hip_filename
stringlengths 5
84
| hip_content
stringlengths 79
9.69M
| cuda_filename
stringlengths 4
83
| cuda_content
stringlengths 19
9.69M
|
---|---|---|---|
13cbb321e2adbf5addb0e0eed127177e590afaaf.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "Test31"
#define BLOCKSIZEX 128
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 128
#define YDIM 128
#define ZDIM 64
#define TMAX 100
#define STARTF 0
#define OBSTR1 4.f
#define OBSTX1 31.5f
#define OBSTY1 31.5f
#define OBSTZ1 15.5f
#define OBSTR2 4.f
#define OBSTX2 63.5f
#define OBSTY2 31.5f
#define OBSTZ2 31.5f
#define RE 100.f//2000.f//100.f;
#define UMAX 0.08f
#define METHOD "SINGLE" //SINGLE,HYB,TEXT,SHARED,CACHE
#define SmagLES "NO" //YES,NO
#define MODEL "BGK" //BGK,MRT,STREAM
#define ZPERIODIC "NO"
#define CS 0.04f
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
25 xsymmetry_top
26 xsymmetry_bot
*/
inline __device__ int ImageFcn(float x, float y, float z){
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// return 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// return 10;
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
// return 10;
// }
// else
// //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
return 10;
else
return 0;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
//Sphere
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
//// if(z == 0 || z == ZDIM-1)
//// return 1;
//// else
// return 10;
// }
// if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// else if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
// else if(x == 0)
// value = 26;
// else if(x == XDIM-1)
// value = 25;
// else if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
//return value;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
else if(x == 0)
value = 1;
else if(x == XDIM-1)
value = 1;
// else if(x == 0)
// return 53;
// else if(x == XDIM-1)
// return 54;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(y == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(y == YDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(zcoord)*1.5;
v = 0.0f;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float usqr = u*u+v*v+w*w;
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f0 = 1.0f/3.0f*(rho-1.5f*usqr);
// f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
//// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
//// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
//// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
//// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
//// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
//// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
//// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
//// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
//// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
//// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
//// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
//// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
u = UMAX;//*PoisProf(z)*1.5;
//v = 0.0f;
//w = 0.0f;
float usqr = u*u;//+v*v+w*w;
float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = feq1 +f3 -feq3 ;
f5 = feq5 +f7 -feq7 ;
f8 = feq8 +f6 -feq6 ;
f10= feq10+f17-feq17;
f15= feq15+f12-feq12;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
else if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
float rho = 1.0f;
//v = 0.0f;
//w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float usqr = u*u;//+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f3 = feq3 +f1 -feq1 ;
f7 = feq7 +f5 -feq5 ;
f6 = feq6 +f8 -feq8 ;
f17= feq17+f10-feq10;
f12= feq12+f15-feq15;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
__device__ void NeumannEast(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float u2 = u*u;
float v2 = v*v;
float w2 = w*w;
float usqr = u2+v2+w2;
// f3 = f1 -0.333333333f*u;
// f7 = f5 -0.166666667f*(u+v);
// f6 = f8 -0.166666667f*(u-v);
// f17= f10-0.166666667f*(u+w);
// f12= f15-0.166666667f*(u-w);
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void xsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f1 = f3 ;
f5 = f6 ;
f8 = f7 ;
f10= f12;
f15= f17;
}
__device__ void xsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
// else if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f3 = f1 ;
f6 = f5 ;
f7 = f8 ;
f12= f10;
f17= f15;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
inline __device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
//DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
//NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// if(im == 4)//DirichletWest
// {
// NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
}
inline __device__ void boundaries_force(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
else if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//zsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//zsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void North_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float rho)
{
rho = 1.0f;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
inline __device__ void South_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float v)
{
float rho,u,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
// f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr));
// f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
//// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9));
//// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
//// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9;
// float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// //float Cs = 0.01f;
// omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
// //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR);
// //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR);
//omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
//float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q));
omega = 1.f/tau;
//float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f;
//omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float Cs)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
// float PI11 = -0.026315789f*m1-0.5f *omega*m9;
// float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//
// float PI12 = -1.5f*omega*m13;
// float PI23 = -1.5f*omega*m14;
// float PI13 = -1.5f*omega*m15;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
// float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9));
// float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
// float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)/3.0f;
// float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13);
// omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//
//float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS);
//
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//
//float tau0 = 1.f/omega;
//float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f;
//omega = 1.f/tau;
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS);
//omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f);
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q));
omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w);
//float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w);
//float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
//float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
//float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
//float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
//float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
//float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
//float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
//float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f);
//omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR);
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0c = 1.f/omega;
//float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh
//omega = 1.f/tau;//total omega on coarse mesh
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
//omega2= 1.f/tau;
SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);//for post-collision
//SF = omega*0.5f/omega2;//for post-streaming, pre-collision?
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float omega2)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float PI11 = -0.026315789f*m1-0.5f *omega*m9;
//float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
//float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//float PI12 = -1.5f*omega*m13;
//float PI23 = -1.5f*omega*m14;
//float PI13 = -1.5f*omega*m15;
////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2)
//float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f);
////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0f = 1.f/omega2;
//float tau0c = 1.f/omega;
//float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine
//omega2 = 1.f/tau;//total omega on fine mesh
//tau = LRLEVEL*(tau-tau0f)+tau0c;
//omega= 1.f/tau;
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q));
float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);
//float SF = omega2*2.f/omega;
//float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2));
//SF = omega*2.f/omega2;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM*ZDIM/GPU_N-2) index = 19*pitch*(YDIM*ZDIM/GPU_N-2);
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM) index = 19*pitch*YDIM;
return index;
}
__global__ void update_inner(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
// }
}
__global__ void update_bottom(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2);
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_top(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__device__ __inline__ float ld_gb1_cg(const float *addr)
{
float return_value;
asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr));
return return_value;
}
__global__ void initialize_single(float *f, size_t pitch, int GPU_N)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,z);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.05f;
v = UMAX;
w = 0.0f;
// if(im == 10 || im == 1){
// u = 0.0f;
// v = 0.0f;
// w = 0.0f;
// }
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
float f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f[j+0 *pitch*YDIM*(ZDIM/GPU_N-2)]=f0 ;
f[j+1 *pitch*YDIM*(ZDIM/GPU_N-2)]=f1 ;
f[j+2 *pitch*YDIM*(ZDIM/GPU_N-2)]=f2 ;
f[j+3 *pitch*YDIM*(ZDIM/GPU_N-2)]=f3 ;
f[j+4 *pitch*YDIM*(ZDIM/GPU_N-2)]=f4 ;
f[j+5 *pitch*YDIM*(ZDIM/GPU_N-2)]=f5 ;
f[j+6 *pitch*YDIM*(ZDIM/GPU_N-2)]=f6 ;
f[j+7 *pitch*YDIM*(ZDIM/GPU_N-2)]=f7 ;
f[j+8 *pitch*YDIM*(ZDIM/GPU_N-2)]=f8 ;
f[j+9 *pitch*YDIM*(ZDIM/GPU_N-2)]=f9 ;
f[j+10*pitch*YDIM*(ZDIM/GPU_N-2)]=f10;
f[j+11*pitch*YDIM*(ZDIM/GPU_N-2)]=f11;
f[j+12*pitch*YDIM*(ZDIM/GPU_N-2)]=f12;
f[j+13*pitch*YDIM*(ZDIM/GPU_N-2)]=f13;
f[j+14*pitch*YDIM*(ZDIM/GPU_N-2)]=f14;
f[j+15*pitch*YDIM*(ZDIM/GPU_N-2)]=f15;
f[j+16*pitch*YDIM*(ZDIM/GPU_N-2)]=f16;
f[j+17*pitch*YDIM*(ZDIM/GPU_N-2)]=f17;
f[j+18*pitch*YDIM*(ZDIM/GPU_N-2)]=f18;
}
if(x == XDIM-1){
for(int i = XDIM; i<pitch; i++){
j = i+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
f[j+0 *pitch*YDIM*ZDIM]=0.f;
f[j+1 *pitch*YDIM*ZDIM]=0.f;
f[j+2 *pitch*YDIM*ZDIM]=0.f;
f[j+3 *pitch*YDIM*ZDIM]=0.f;
f[j+4 *pitch*YDIM*ZDIM]=0.f;
f[j+5 *pitch*YDIM*ZDIM]=0.f;
f[j+6 *pitch*YDIM*ZDIM]=0.f;
f[j+7 *pitch*YDIM*ZDIM]=0.f;
f[j+8 *pitch*YDIM*ZDIM]=0.f;
f[j+9 *pitch*YDIM*ZDIM]=0.f;
f[j+10*pitch*YDIM*ZDIM]=0.f;
f[j+11*pitch*YDIM*ZDIM]=0.f;
f[j+12*pitch*YDIM*ZDIM]=0.f;
f[j+13*pitch*YDIM*ZDIM]=0.f;
f[j+14*pitch*YDIM*ZDIM]=0.f;
f[j+15*pitch*YDIM*ZDIM]=0.f;
f[j+16*pitch*YDIM*ZDIM]=0.f;
f[j+17*pitch*YDIM*ZDIM]=0.f;
f[j+18*pitch*YDIM*ZDIM]=0.f;
}
}
}
__global__ void initialize_buffer(float *g, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.05f;
v = UMAX;
w = 0.0f;
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
g[j+0 *pitch*YDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
g[j+1 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+2 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+3 *pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+4 *pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+5 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
g[j+6 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
g[j+7 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
g[j+8 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
g[j+9 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+10*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
g[j+11*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
g[j+12*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
g[j+13*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
g[j+14*pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+15*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
g[j+16*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
g[j+17*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
g[j+18*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
float f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
g[j+0 *pitch*YDIM]=f0 ;
g[j+1 *pitch*YDIM]=f1 ;
g[j+2 *pitch*YDIM]=f2 ;
g[j+3 *pitch*YDIM]=f3 ;
g[j+4 *pitch*YDIM]=f4 ;
g[j+5 *pitch*YDIM]=f5 ;
g[j+6 *pitch*YDIM]=f6 ;
g[j+7 *pitch*YDIM]=f7 ;
g[j+8 *pitch*YDIM]=f8 ;
g[j+9 *pitch*YDIM]=f9 ;
g[j+10*pitch*YDIM]=f10;
g[j+11*pitch*YDIM]=f11;
g[j+12*pitch*YDIM]=f12;
g[j+13*pitch*YDIM]=f13;
g[j+14*pitch*YDIM]=f14;
g[j+15*pitch*YDIM]=f15;
g[j+16*pitch*YDIM]=f16;
g[j+17*pitch*YDIM]=f17;
g[j+18*pitch*YDIM]=f18;
}
}
//zMin = minimum zcoord, zNum = number of nodes in z
void WriteResults(float *f, ofstream &output, float omega, int zMin, int zNum)
{
for(int k = 0; k<zNum; k++){
for(int i = 0; i<YDIM; i++){
for(int j = 0; j<XDIM; j++){
int index = i*XDIM+j;
float f0 = f[index+XDIM*YDIM*zNum*0 ];
float f1 = f[index+XDIM*YDIM*zNum*1 ];
float f2 = f[index+XDIM*YDIM*zNum*2 ];
float f3 = f[index+XDIM*YDIM*zNum*3 ];
float f4 = f[index+XDIM*YDIM*zNum*4 ];
float f5 = f[index+XDIM*YDIM*zNum*5 ];
float f6 = f[index+XDIM*YDIM*zNum*6 ];
float f7 = f[index+XDIM*YDIM*zNum*7 ];
float f8 = f[index+XDIM*YDIM*zNum*8 ];
float f9 = f[index+XDIM*YDIM*zNum*9 ];
float f10= f[index+XDIM*YDIM*zNum*10];
float f11= f[index+XDIM*YDIM*zNum*11];
float f12= f[index+XDIM*YDIM*zNum*12];
float f13= f[index+XDIM*YDIM*zNum*13];
float f14= f[index+XDIM*YDIM*zNum*14];
float f15= f[index+XDIM*YDIM*zNum*15];
float f16= f[index+XDIM*YDIM*zNum*16];
float f17= f[index+XDIM*YDIM*zNum*17];
float f18= f[index+XDIM*YDIM*zNum*18];
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<j<<", "<<i<<", "<<zMin+k<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}}}
}
int main(int argc, char *argv[])
{
int GPU_N;
hipGetDeviceCount(&GPU_N);
GPU_N = 1;
cout<<"number of GPUs: "<<GPU_N<<endl;
//int *image_d, *image_h;
ofstream output;
ofstream output2;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
output2.open ((FileName+".force").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch = pitch*sizeof(float);
size_t pitch_elements = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
int i, nBlocks;
float omega, CharLength;
CharLength = OBSTR1*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
cout<<"omega : "<<omega<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Method: "<<METHOD<<endl;
cout<<"Model: "<<MODEL<<endl;
int zInner = ZDIM/GPU_N-2; //excluding halo
//int zGPU = ZDIM/GPU_N;//z nodes per GPU (including halo)
//nBlocks does not include the halo layers
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY)
*((zInner+BLOCKSIZEZ-1)/BLOCKSIZEZ);
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 h_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
hipStream_t stream_halo[GPU_N];
hipStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_inner_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_inner_A_d[GPU_N], *g_A_d[GPU_N], *h_A_d[GPU_N];
float *f_inner_B_d[GPU_N], *g_B_d[GPU_N], *h_B_d[GPU_N];
float *g_temp[GPU_N], *h_temp[GPU_N];
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_inner_h[n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
hipSetDevice(n);
hipStreamCreate(&stream_halo[n]);
hipStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++){
if(m != n)
hipDeviceEnablePeerAccess(m,0);
}
hipMalloc((void **) &f_inner_A_d[n], pitch*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) &f_inner_B_d[n], pitch*YDIM*zInner*19*sizeof(float));
hipMalloc((void **) & g_A_d[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & g_B_d[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_A_d[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_B_d[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & g_temp[n], pitch*YDIM* 19*sizeof(float));
hipMalloc((void **) & h_temp[n], pitch*YDIM* 19*sizeof(float));
//initialize host f_inner
for (i = 0; i < XDIM*YDIM*zInner*19; i++)
f_inner_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
hipMemcpy2D(f_inner_A_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D(f_inner_B_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyHostToDevice);
hipMemcpy2D( g_A_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( g_B_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_A_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipMemcpy2D( h_B_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( initialize_single), dim3(grid) , dim3(threads), 0, 0, f_inner_A_d[n],pitch_elements,GPU_N);
hipLaunchKernelGGL(( initialize_single), dim3(grid) , dim3(threads), 0, 0, f_inner_B_d[n],pitch_elements,GPU_N);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, g_A_d[n],pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, g_B_d[n],pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, h_A_d[n],pitch_elements);
hipLaunchKernelGGL(( initialize_buffer), dim3(g_grid), dim3(threads), 0, 0, h_B_d[n],pitch_elements);
}//end Malloc and Initialize
struct timeval tdr0,tdr1;
double restime;
hipDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//Time loop
for(int t = 0; t<TMAX; t+=2){
//A->B
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpyPeerAsync(&h_temp[n][pitch_elements*YDIM*14],n,&g_A_d[ (n+1)%GPU_N][pitch_elements*YDIM*14], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_temp[n][pitch_elements*YDIM*9 ],n,&h_A_d[abs(n-1)%GPU_N][pitch_elements*YDIM*9 ],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*5,stream_halo[n]);
hipStreamSynchronize(stream_halo[n]);
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_A_d[n],f_inner_B_d[n], g_A_d[n], h_A_d[n],omega,pitch_elements,n,zInner);
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_A_d[n], h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_A_d[n], g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
hipDeviceSynchronize();
//B->A
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpyPeerAsync(&h_temp[n][pitch_elements*YDIM*14],n,&g_B_d[ (n+1)%GPU_N][pitch_elements*YDIM*14], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*5,stream_halo[n]);
hipMemcpyPeerAsync(&g_temp[n][pitch_elements*YDIM*9 ],n,&h_B_d[abs(n-1)%GPU_N][pitch_elements*YDIM*9 ],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*5,stream_halo[n]);
hipStreamSynchronize(stream_halo[n]);
hipLaunchKernelGGL(( update_inner) , dim3(grid), dim3(threads), 0, stream_inner[n], f_inner_B_d[n],f_inner_A_d[n], g_B_d[n], h_B_d[n],omega,pitch_elements,n,zInner);
hipLaunchKernelGGL(( update_top) , dim3(h_grid), dim3(threads), 0, stream_halo [n], h_B_d[n], h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner);
hipLaunchKernelGGL(( update_bottom), dim3(h_grid), dim3(threads), 0, stream_halo [n], g_B_d[n], g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
hipDeviceSynchronize();
}//end Time loop
hipDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
hipSetDevice(n);
hipMemcpy2D(f_inner_h[n],XDIM*sizeof(float),f_inner_A_d[n],pitch,XDIM*sizeof(float),YDIM*zInner*19,hipMemcpyDeviceToHost);
hipMemcpy2D( g_h[n],XDIM*sizeof(float), g_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,hipMemcpyDeviceToHost);
hipMemcpy2D( h_h[n],XDIM*sizeof(float), h_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,hipMemcpyDeviceToHost);
//Write results
WriteResults( g_h[n],output,omega,ZDIM/GPU_N*n ,1 );
WriteResults(f_inner_h[n],output,omega,ZDIM/GPU_N*n+1 ,zInner);
WriteResults( h_h[n],output,omega,ZDIM/GPU_N*(n+1)-1,1 );
hipFree(f_inner_A_d[n]);
hipFree(f_inner_B_d[n]);
hipFree( g_A_d[n]);
hipFree( g_B_d[n]);
hipFree( h_A_d[n]);
hipFree( h_B_d[n]);
hipFree( g_temp[n]);
hipFree( h_temp[n]);
}//end write results
return(0);
}
| 13cbb321e2adbf5addb0e0eed127177e590afaaf.cu | #include <cuda.h>
//#include <cutil.h>
#include <iostream>
#include <ostream>
#include <fstream>
//#include "/home/yusuke/NVIDIA_GPU_Computing_SDK/C/common/inc/cutil.h"
using namespace std;
#define CASENAME "Test31"
#define BLOCKSIZEX 128
#define BLOCKSIZEY 1
#define BLOCKSIZEZ 1
#define BLOCKSIZELRX 64
#define BLOCKSIZELRY 1
#define BLOCKSIZELRZ 1
#define XDIM 128
#define YDIM 128
#define ZDIM 64
#define TMAX 100
#define STARTF 0
#define OBSTR1 4.f
#define OBSTX1 31.5f
#define OBSTY1 31.5f
#define OBSTZ1 15.5f
#define OBSTR2 4.f
#define OBSTX2 63.5f
#define OBSTY2 31.5f
#define OBSTZ2 31.5f
#define RE 100.f//2000.f//100.f;
#define UMAX 0.08f
#define METHOD "SINGLE" //SINGLE,HYB,TEXT,SHARED,CACHE
#define SmagLES "NO" //YES,NO
#define MODEL "BGK" //BGK,MRT,STREAM
#define ZPERIODIC "NO"
#define CS 0.04f
//#define CHARLENGTH = XDIM-2.f;
//#define BLOCKSIZE 16;
//int const XDIM = 32;
//int const YDIM = 32;
#include <sys/time.h>
#include <time.h>
/*
Image List:
0 fluid
1 BB
2
3 DirichletWest(simple)
10 BB(force)
13 DirichletWest_Reg
14 NeumannEast_Reg
15 DirichletNorth_Reg
16 DirichletSouth_Reg
21 ysymmetry_top
22 ysymmetry_bot
23 zsymmetry_top
24 zsymmetry_bot
25 xsymmetry_top
26 xsymmetry_bot
*/
inline __device__ int ImageFcn(float x, float y, float z){
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// return 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// return 10;
//if(((x-OBSTX)*(x-OBSTX)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
// return 10;
// }
// else
// //if(y < 0.1f || z < 0.1f || (XDIM-x) < 0.1f || (YDIM-y) < 0.1f || (ZDIM-z) < 0.1f)
// if(y < 17.5f || z < 17.5f || y > 46.5f || z > 46.5f)
// return 1;
// else if(x < 17.5f)
// return 13;
// else if(x > 78.5f)
// return 14;
// else
if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
return 10;
else
return 0;
}
inline __device__ int ImageFcn(int x, int y, int z){
int value = 0;
//Cylinder
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1))<OBSTR1*OBSTR1)
// value = 10;
// else if(((x-OBSTX2)*(x-OBSTX2)+(y-OBSTY2)*(y-OBSTY2))<OBSTR2*OBSTR2)
// value = 10;
//Sphere
// if(((x-OBSTX1)*(x-OBSTX1)+(y-OBSTY1)*(y-OBSTY1)+(z-OBSTZ1)*(z-OBSTZ1))<OBSTR1*OBSTR1)
// {
//// if(z == 0 || z == ZDIM-1)
//// return 1;
//// else
// return 10;
// }
// if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
// else if(y == 0)
// value = 200;//22;
// else if(y == YDIM-1)
// value = 100;
// else if(x == 0)
// value = 26;
// else if(x == XDIM-1)
// value = 25;
// else if(z == 0)
// value = 0;
// else if(z == ZDIM-1)
// value = 0;
//return value;
//Lid Driven Cavity
// if(y == 0 || y == YDIM-1 || z == 0 || z == ZDIM-1)
// value = 1;
// else if(x == XDIM-2 || y == 1 || y == YDIM-2 || z == 1 || z == ZDIM-2)
// return 1;
// else if(x == 0)
// return 1;
// if(abs(x-OBSTX1) < OBSTR1 && abs(y-OBSTY1) < OBSTR1)
// value = 10;
if(y == 0)
value = 200;//22;
else if(y == YDIM-1)
value = 100;
else if(x == 0)
value = 1;
else if(x == XDIM-1)
value = 1;
// else if(x == 0)
// return 53;
// else if(x == XDIM-1)
// return 54;
return value;
}
inline __device__ float PoisProf (float x){
float radius = (YDIM-1-1)*0.5f;
float result = -1.0f*(((1.0f-(x-0.5f)/radius))*((1.0f-(x-0.5f)/radius))-1.0f);
return (result);
// return 1.f;
}
__device__ void DirichletWest(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(y == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(y == YDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(zcoord)*1.5;
v = 0.0f;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float usqr = u*u+v*v+w*w;
f1 = fma(0.0555555556f,6.0f*u,f3);//0.0555555556f*(6.0f*u)+f3;//-0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);;
f5 = fma(0.0277777778f,6.0f*(u+v),f7 );// -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = fma(0.0277777778f,6.0f*(u-v),f6 );// -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f10= fma(0.0277777778f,6.0f*(u+w),f17);//-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f15= fma(0.0277777778f,6.0f*(u-w),f12);//-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f0 = 1.0f/3.0f*(rho-1.5f*usqr);
// f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
//// f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
//// f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
//// f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
//// f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
//// f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
//// f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
//// f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
//// f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
//// f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
//// f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
//// f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
//// f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
//// f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void DirichletWest_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0){
f2 = f4;
f6 = f7;
f11 = f13;
f16 = f18;
}
else if(y == YDIM-1){
f4 = f2;
f7 = f6;
f13 = f11;
f18 = f16;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;//*PoisProf(y)*1.5;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f1 = f3+0.33333333f*u;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f5 = f7+0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f10= f17+0.166666667f*(u+w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f15= f12+0.166666667f*(u-w);
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
void __device__ DirichletWest_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
u = UMAX;//*PoisProf(z)*1.5;
//v = 0.0f;
//w = 0.0f;
float usqr = u*u;//+v*v+w*w;
float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f1 = feq1 +f3 -feq3 ;
f5 = feq5 +f7 -feq7 ;
f8 = feq8 +f6 -feq6 ;
f10= feq10+f17-feq17;
f15= feq15+f12-feq12;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
void __device__ NeumannEast_Regularized(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
else if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f18 = f13;
}
}
float PI11 = 0;
float PI12 = 0;
float PI22 = 0;
float PI33 = 0;
float PI13 = 0;
float PI23 = 0;
float u;//,v;//,w;//,rho;
float rho = 1.0f;
//v = 0.0f;
//w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float usqr = u*u;//+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho -1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho -1.5f*usqr);
float feq9 = 0.0555555556f*(rho -1.5f*usqr);
float feq14 = 0.0555555556f*(rho -1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho -1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho -1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*( u)+4.5f*( u)*( u)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho -1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u)+4.5f*(-u)*(-u)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho -1.5f*usqr);
// float feq0 = 0.3333333333f*(rho-1.5f*usqr);
// float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
// float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
// float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
// float feq5 = 0.0277777778f*(rho+3.0f*( u+v)+4.5f*( u+v)*( u+v)-1.5f*usqr);
// float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
// float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
// float feq8 = 0.0277777778f*(rho+3.0f*( u-v)+4.5f*( u-v)*( u-v)-1.5f*usqr);
// float feq10 = 0.0277777778f*(rho+3.0f*( u+w)+4.5f*( u+w)*( u+w)-1.5f*usqr);
// float feq11 = 0.0277777778f*(rho+3.0f*( v+w)+4.5f*( v+w)*( v+w)-1.5f*usqr);
// float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
// float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
// float feq15 = 0.0277777778f*(rho+3.0f*( u-w)+4.5f*( u-w)*( u-w)-1.5f*usqr);
// float feq16 = 0.0277777778f*(rho+3.0f*( v-w)+4.5f*( v-w)*( v-w)-1.5f*usqr);
// float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
// float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f3 = feq3 +f1 -feq1 ;
f7 = feq7 +f5 -feq5 ;
f6 = feq6 +f8 -feq8 ;
f17= feq17+f10-feq10;
f12= feq12+f15-feq15;
PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
f0 = feq0 +1.5f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f1 = feq1 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f2 = feq2 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f3 = feq3 +0.25f *(( 0.666666667f)*PI11 +( -0.333333333f)*PI22+( -0.333333333f)*PI33) ;
f4 = feq4 +0.25f *(( -0.333333333f)*PI11 +( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f5 = feq5 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f6 = feq6 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f7 = feq7 +0.125f*(( 0.666666667f)*PI11+2.0f*( PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f8 = feq8 +0.125f*(( 0.666666667f)*PI11+2.0f*(-PI12 )+( 0.666666667f)*PI22+( -0.333333333f)*PI33) ;
f9 = feq9 +0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f10 = feq10+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f11 = feq11+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f12 = feq12+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f13 = feq13+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f14 = feq14+0.25f *(( -0.333333333f)*PI11 +( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f15 = feq15+0.125f*(( 0.666666667f)*PI11+2.0f*( +-PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f16 = feq16+0.125f*(( -0.333333333f)*PI11+2.0f*( +-PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
f17 = feq17+0.125f*(( 0.666666667f)*PI11+2.0f*( + PI13 )+( -0.333333333f)*PI22+( 0.666666667f)*PI33) ;
f18 = feq18+0.125f*(( -0.333333333f)*PI11+2.0f*( + PI23)+( 0.666666667f)*PI22+( 0.666666667f)*PI33) ;
}
__device__ void NeumannEast(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
float u2 = u*u;
float v2 = v*v;
float w2 = w*w;
float usqr = u2+v2+w2;
// f3 = f1 -0.333333333f*u;
// f7 = f5 -0.166666667f*(u+v);
// f6 = f8 -0.166666667f*(u-v);
// f17= f10-0.166666667f*(u+w);
// f12= f15-0.166666667f*(u-w);
f0 = 1.0f/3.0f*(rho-1.5f*usqr);
f1 = 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f2 = 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f3 = 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f4 = 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f5 = 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f6 = 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f7 = 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f8 = 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f9 = 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f10= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f11= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr);
f12= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f13= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr);
f14= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f15= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f16= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f17= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f18= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
__device__ void NeumannEast_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
// f6 = f7;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
// f7 = f6;
f13 = f11;
f18 = f16;
f8 = f5;
}
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
// f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
// f17 = f12;
f18 = f13;
}
}
float u,v,w;//,rho;
float rho = 1.0f;
v = 0.0f;
w = 0.0f;
u = -rho+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f1+f8+f5+f10+f15)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f3 = f1 -0.333333333f*u;
f7 = f5 -0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f17= f10-0.166666667f*(u+w);
f12= f15-0.166666667f*(u-w);
// f3 =(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2)+
// (f1-(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2));
// f7 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v)+
// (f5-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v));
// f6 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v)+
// (f8-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v));
// f17=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w)+
// (f10-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w));
// f12=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w)+
// (f15-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w));
// f1 =(0.1031746045f*rho+ -0.0231796391f*usqr+ (0.166666667f*u) + 0.16666667f*u2)+
// (f3-(0.1031746045f*rho+ -0.0231796391f*usqr+-(0.166666667f*u) + 0.16666667f*u2));
// f5 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+v +u2+(v2-w2))+ 0.25f*u*v)+
// (f7-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-v +u2+(v2-w2))+ 0.25f*u*v));
// f8 =(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-v +u2+(v2-w2))+ -0.25f*u*v)+
// (f6-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+v +u2+(v2-w2))+ -0.25f*u*v));
// f10=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u+w +u2+(v2-w2))+ 0.25f*u*w)+
// (f17-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u-w +u2+(v2-w2))+ 0.25f*u*w));
// f15=(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*( u-w +u2+(v2-w2))+ -0.25f*u*w)+
// (f12-(0.0158730149f*rho+ 0.00579491071f*usqr+ 0.0833333333f*(-u+w +u2+(v2-w2))+ -0.25f*u*w));
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletNorth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
// f1 =(0.166666667f*u)+
// (f3-(-(0.166666667f*u)));
f4 = f2-0.33333333f*v;
// f5 =(0.0833333333f*( u+v))+
// (f7-(0.0833333333f*(-u-v)));
f7 = f5-0.166666667f*(u+v);
// f8 =(0.0833333333f*( u-v ))+
// (f6-(0.0833333333f*(-u+v )));
f8 = f6+0.166666667f*(u-v);
// f10=(0.0833333333f*( u+w))+
// (f17-(0.0833333333f*(-u-w)));
f13= f16-0.166666667f*(v-w);
// f15=(0.0833333333f*( u-w))+
// (f12-(0.0833333333f*(-u+w)));
f18= f11-0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void DirichletSouth_Reg(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
// if(x == 0){
// f2 = f4;
// f6 = f7;
// f11 = f13;
// f16 = f18;
// }
// else if(x == XDIM-1){
// f4 = f2;
// f7 = f6;
// f13 = f11;
// f18 = f16;
// }
if(z == 0){
f9 = f14;
f10 = f15;
f11 = f16;
f12 = f17;
f13 = f18;
}
else if(z == ZDIM-1){
f14 = f9;
f15 = f10;
f16 = f11;
f17 = f12;
f18 = f13;
}
float u,v,w;//,rho;
u = UMAX;
v = 0.0f;//0.0;
w = 0.0f;
// float rho = u+(f0+f2+f4+f9+f11+f13+f14+f16+f18+2.0f*(f3+f6+f7+f12+f17)); //D2Q9i
// float u2 = u*u;
// float v2 = v*v;
// float w2 = w*w;
// float usqr = u2+v2+w2;
f2 = f4 +0.33333333f*v;
f5 = f7 +0.166666667f*(u+v);
f6 = f8 -0.166666667f*(u-v);
f16= f13+0.166666667f*(v-w);
f11= f18+0.166666667f*(v+w);
//
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr
//float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u + 0.055555556f*(2.f*u*u-(v*v+w*w));
//float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v +-0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
//float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
//float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
//float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w; +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w)+ 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w)+-0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
//float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w +-0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w)
//float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
//float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
//float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) + 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
//float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w) +-0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//
// float PI11 = f1 +f3 +f5 +f6 +f7 +f8 +f10+f12+f15+f17;
// float PI22 = f2 +f4 +f5 +f6 +f7 +f8 +f11+f13+f16+f18;
// float PI33 = f9 +f10+f11+f12+f13+f14+f15+f16+f17+f18;
}
__device__ void xsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13=f18;
f11=f18;
f16=f18;
f6 =f7;
f9 =f14;
f12=f17;
}
else if(y == 0 && z == ZDIM-1){
f4 = f2;
f11=f13;
f18=f13;
f16=f13;
f6 =f7;
f14=f9;
f17=f12;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f11=f16;
f18=f16;
f13=f16;
f7 =f6;
f9 =f14;
f12=f17;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f16=f11;
f18=f11;
f13=f11;
f7 =f6;
f14=f9;
f17=f12;
}
else{
if(y == 0){
f2 = f4;
f11=f13;
f16=f18;
f8 = f5;
}
else if(y == YDIM-1){
f4=f2 ;
f13=f11;
f18=f16;
f5=f8 ;
}
// if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f1 = f3 ;
f5 = f6 ;
f8 = f7 ;
f10= f12;
f15= f17;
}
__device__ void xsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z)
{
if(y == 0 && z == 0){
f2 = f4;
f13 = f18;
f11 = f18;
f16 = f18;
f5 = f8;
f9 = f14;
f10 = f15;
}
else if(y == 0 && z == ZDIM-1){
f2 = f4;
f11 = f13;
f18 = f13;
f16 = f13;
f5 = f8;
f14 = f9;
f15 = f10;
}
else if(y == YDIM-1 && z == 0){
f4 = f2;
f18 = f16;
f11 = f16;
f13 = f16;
f8 = f5;
f9 = f14;
f10 = f15;
}
else if(y == YDIM-1 && z == ZDIM-1){
f4 = f2;
f13 = f11;
f16 = f11;
f18 = f11;
f8 = f5;
f14 = f9;
f15 = f10;
}
else{
if(y == 0){
f2 = f4;
f11 = f13;
f16 = f18;
f5 = f8;
}
else if(y == YDIM-1){
f4 = f2;
f13 = f11;
f18 = f16;
f8 = f5;
}
// else if(z == 0){
// f9 = f14;
// f10 = f15;
// f11 = f16;
// f12 = f17;
// f13 = f18;
// }
// else if(z == ZDIM-1){
// f14 = f9;
// f15 = f10;
// f16 = f11;
// f17 = f12;
// f18 = f13;
// }
}
f3 = f1 ;
f6 = f5 ;
f7 = f8 ;
f12= f10;
f17= f15;
}
__device__ void ysymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
__device__ void ysymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int z)
{
if(z == 0){
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
if(z == ZDIM-1){
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
__device__ void zsymmetry_top(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f14= f9 ;
f15= f10;
f16= f11;
f17= f12;
f18= f13;
}
__device__ void zsymmetry_bot(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y)
{
if(y == 0){
f2 = f4 ;
f6 = f7 ;
f5 = f8 ;
f11= f13;
f16= f18;
}
if(y == YDIM-1){
f4 = f2 ;
f7 = f6 ;
f8 = f5 ;
f13= f11;
f18= f16;
}
f9 = f14;
f10= f15;
f11= f16;
f12= f17;
f13= f18;
}
inline __device__ void boundaries(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
//DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
//NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// if(im == 4)//DirichletWest
// {
// NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 13)//DirichletWest
// {
// DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 14)//DirichletWest
// {
// NeumannEast_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
}
inline __device__ void boundaries_force(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, int y, int z, int im)
{
// if(im == 3)//DirichletWest
// {
// DirichletWest(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
if(im == 53)//DirichletWest
{
DirichletWest_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//DirichletWest_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 54)//DirichletWest
{
NeumannEast(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
//NeumannEast_Regularized(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
// else if(im == 15)//DirichletNorth
// {
// DirichletNorth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
// else if(im == 16)//DirichletSouth
// {
// DirichletSouth_Reg(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
// }
else if(im == 21)//ysymm top
{
ysymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 22)//ysymm bot
{
ysymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,z);
}
else if(im == 23)//zsymm top
{
zsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 24)//zsymm bot
{
zsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y);
}
else if(im == 25)//zsymm top
{
xsymmetry_top(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
else if(im == 26)//zsymm bot
{
xsymmetry_bot(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z);
}
}
inline __device__ void North_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float rho)
{
rho = 1.0f;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
inline __device__ void South_Extrap(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float v)
{
float rho,u,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = 0.f;//f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
w = 0.f;//f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
m1 = -30.f*f0+-11.f*f1+-11.f*f2+-11.f*f3+-11.f*f4+ 8.f*f5+ 8.f*f6+ 8.f*f7+ 8.f*f8+-11.f*f9+ 8.f*f10+ 8.f*f11+ 8.f*f12+ 8.f*f13+-11.f*f14+ 8.f*f15+ 8.f*f16+ 8.f*f17+ 8.f*f18;
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18;
m4 = -4.f*f1 + 4.f*f3 + f5+ - f6+ - f7+ f8 + f10 + - f12 + f15 + - f17 ;
m6 = -4.f*f2 + 4.f*f4+ f5+ f6+ - f7+ - f8 + f11 + - f13 + f16 + - f18;
m8 = + -4.f*f9+ f10+ f11+ f12+ f13+ 4.f*f14+ - f15+ - f16+ - f17+ - f18;
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+ - f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ - f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m10 = -4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+ -2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+ -2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+ - f9+ - f10 + - f12 + - f14+ - f15 + - f17 ;
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ - f10 + - f12 + 2.f*f14+ - f15 + - f17 ;
m13 = f5+ - f6+ f7+ - f8 ;
m14 = f11 + - f13 + - f16 + f18;
m15 = f10 + - f12 + - f15 + f17 ;
m16 = f5+ - f6+ - f7+ f8 - f10 + f12 + - f15 + f17 ;
m17 = - f5+ - f6+ f7+ f8 + f11 + - f13 + f16 + - f18;
m18 = f10+ - f11+ f12+ - f13 + - f15+ f16+ - f17+ f18;
f0 =(0.052631579f*rho +- 0.012531328f*(m1)+ 0.047619048f*(m2));
f1 =(0.052631579f*rho+ 0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)-m10));
f2 =(0.052631579f*rho + 0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f3 =(0.052631579f*rho+ -0.1f*u +-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)-m10));
f4 =(0.052631579f*rho + -0.1f*v +-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)-m10)+ 0.083333333f*((m11)-m12));
f5 =(0.052631579f*rho+ 0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f6 =(0.052631579f*rho+ -0.1f*u+ 0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f7 =(0.052631579f*rho+ -0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13))));
f8 =(0.052631579f*rho+ 0.1f*u+ -0.1f*v + 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ (0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13))));
f9 =(0.052631579f*rho + 0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f10=(0.052631579f*rho+ 0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f11=(0.052631579f*rho + 0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
f12=(0.052631579f*rho+ -0.1f*u + 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f13=(0.052631579f*rho + -0.1f*v+ 0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f14=(0.052631579f*rho + -0.1f*w+-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)-m10)+-0.083333333f*((m11)-m12));
f15=(0.052631579f*rho+ 0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15))));
f16=(0.052631579f*rho + 0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +(-0.25f*(m14))));
f17=(0.052631579f*rho+ -0.1f*u + -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ (0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15))));
f18=(0.052631579f*rho + -0.1f*v+ -0.1f*w+ 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+(-0.055555556f*(m9) +( 0.25f*(m14))));
}
int
timeval_subtract (double *result, struct timeval *x, struct timeval *y)
{
struct timeval result0;
/* Perform the carry for the later subtraction by updating y. */
if (x->tv_usec < y->tv_usec) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000 + 1;
y->tv_usec -= 1000000 * nsec;
y->tv_sec += nsec;
}
if (x->tv_usec - y->tv_usec > 1000000) {
int nsec = (y->tv_usec - x->tv_usec) / 1000000;
y->tv_usec += 1000000 * nsec;
y->tv_sec -= nsec;
}
/* Compute the time remaining to wait.
tv_usec is certainly positive. */
result0.tv_sec = x->tv_sec - y->tv_sec;
result0.tv_usec = x->tv_usec - y->tv_usec;
*result = ((double)result0.tv_usec)/1e6 + (double)result0.tv_sec;
/* Return 1 if result is negative. */
return x->tv_sec < y->tv_sec;
}
inline __device__ void bgk_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
// f0 =(1.f-omega)*f0 +omega*(0.3333333333f*(rho-1.5f*usqr));
// f1 =(1.f-omega)*f1 +omega*(0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
// f2 =(1.f-omega)*f2 +omega*(0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
// f3 =(1.f-omega)*f3 +omega*(0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
// f4 =(1.f-omega)*f4 +omega*(0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
// f5 =(1.f-omega)*f5 +omega*(0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
// f6 =(1.f-omega)*f6 +omega*(0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
// f7 =(1.f-omega)*f7 +omega*(0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
// f8 =(1.f-omega)*f8 +omega*(0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
// f9 =(1.f-omega)*f9 +omega*(0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
// f10=(1.f-omega)*f10+omega*(0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
// f11=(1.f-omega)*f11+omega*(0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(u+w)-1.5f*usqr));
// f12=(1.f-omega)*f12+omega*(0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
// f13=(1.f-omega)*f13+omega*(0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(u+w)-1.5f*usqr));
// f14=(1.f-omega)*f14+omega*(0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
// f15=(1.f-omega)*f15+omega*(0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
// f16=(1.f-omega)*f16+omega*(0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
// f17=(1.f-omega)*f17+omega*(0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
// f18=(1.f-omega)*f18+omega*(0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
f0 -=omega*(f0 -0.3333333333f*(rho-1.5f*usqr));
f1 -=omega*(f1 -0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr));
f2 -=omega*(f2 -0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr));
f3 -=omega*(f3 -0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr));
f4 -=omega*(f4 -0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr));
f5 -=omega*(f5 -0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr));
f6 -=omega*(f6 -0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr));
f7 -=omega*(f7 -0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr));
f8 -=omega*(f8 -0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr));
f9 -=omega*(f9 -0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr));
f10-=omega*(f10-0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr));
f11-=omega*(f11-0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr));
f12-=omega*(f12-0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr));
f13-=omega*(f13-0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr));
f14-=omega*(f14-0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr));
f15-=omega*(f15-0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr));
f16-=omega*(f16-0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr));
f17-=omega*(f17-0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr));
f18-=omega*(f18-0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr));
}
inline __device__ void mrt_collide(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
//// float PI11 = -1.0f/38.0f*( (m1)+19.0f*omega* (m9));
//// float PI22 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
//// float PI33 = -1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI11 = LRLEVEL*-0.026315789f*m1-0.5f *omega*m9;
// float PI22 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = LRLEVEL*-0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// //float Cs = 0.01f;
// omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
// //omega = 1.0f/(1.0f/omega+3.f*CS*Smag*LRFACTOR*LRFACTOR);
// //omega = 1.0f/(1.0f*LRLEVEL/1.99983f-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)*LRFACTOR/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = LRFACTOR*(sqrt(4.f/9.f*tau0*tau0+8.f*CS*LRFACTOR*Q)-2.f/3.f*tau0)/(4.f*CS*LRFACTOR*LRFACTOR);
//omega = 1.0f/(3.0f*(nu0+CS*Smag*LRFACTOR*LRFACTOR)*LRLEVEL+0.5f);
//float tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*CS*sqrt(2.f)*Q));
omega = 1.f/tau;
//float tau = 3.f*nu0*LRFACTOR+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*LRFACTOR*LRFACTOR*Q)-tau0)*0.5f;
//omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void mrt_collide_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float Cs)
{
float u,v,w;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
float usqr = u*u+v*v+w*w;
// u = rho*u;
// v = rho*v;
// w = rho*w;
float m1,m2,m4,m6,m8,m9,m10,m11,m12,m13,m14,m15,m16,m17,m18;
//COMPUTE M-MEQ
//m1 = -19.f*f0+ 19.f*f5+19.f*f6+19.f*f7+19.f*f8+19.f*f10+19.f*f11+19.f*f12+19.f*f13+19.f*f15+19.f*f16+19.f*f17+19.f*f18 -19.f*(u*u+v*v+w*w);//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
//m4 = -3.33333333f*f1+3.33333333f*f3+1.66666667f*f5-1.66666667f*f6-1.66666667f*f7+1.66666667f*f8+1.66666667f*f10-1.66666667f*f12+1.66666667f*f15-1.66666667f*f17;
//m6 = -3.33333333f*f2+3.33333333f*f4+1.66666667f*f5+1.66666667f*f6-1.66666667f*f7-1.66666667f*f8+1.66666667f*f11-1.66666667f*f13+1.66666667f*f16-1.66666667f*f18;
//m8 = -3.33333333f*f9+1.66666667f*f10+1.66666667f*f11+1.66666667f*f12+1.66666667f*f13+3.33333333f*f14-1.66666667f*f15-1.66666667f*f16-1.66666667f*f17-1.66666667f*f18;
m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));//+8.f*(f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18);
m2 = 12.f*f0+ -4.f*f1+ -4.f*f2+ -4.f*f3+ -4.f*f4+ f5+ f6+ f7+ f8+ -4.f*f9+ f10+ f11+ f12+ f13+ -4.f*f14+ f15+ f16+ f17+ f18 +7.53968254f*(u*u+v*v+w*w);
// m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
// m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
// m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
m13 = f5+-f6+ f7+-f8 -u*v;
m14 = f11 +- f13 + - f16 + f18 -v*w;
m15 = f10 + - f12 +-f15 + f17 -u*w;
m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
if(SmagLES == "YES"){
// float PI11 = -0.026315789f*m1-0.5f *omega*m9;
// float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
// float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//
// float PI12 = -1.5f*omega*m13;
// float PI23 = -1.5f*omega*m14;
// float PI13 = -1.5f*omega*m15;
// float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
// omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
// float PI11 = LRLEVEL*-1.0f/38.0f*( (m1)+19.0f*omega* (m9));
// float PI22 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)-3.0f*omega*(m11)));
// float PI33 = LRLEVEL*-1.0f/76.0f*(2.0f*(m1)-19.0f*(omega*(m9)+3.0f*omega*(m11)));
// float PI12 = LRLEVEL*-1.5f*omega*m13;
// float PI23 = LRLEVEL*-1.5f*omega*m14;
// float PI13 = LRLEVEL*-1.5f*omega*m15;
// float nu0 = ((1.0f/omega)-0.5f)/3.0f;
// float Smag = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+PI12*PI12+PI23*PI23+PI13*PI13);
// omega = 1.0f/(3.0f*(nu0+Cs*Smag*LRLEVEL*LRLEVEL)+0.5f);
//float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//
//float Smag = (sqrt(nu0*nu0+18.f*CS*Q)-nu0)/(6.f*CS);
//
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//
//float tau0 = 1.f/omega;
//float tau = 3.f*nu0+0.5f+(sqrt(tau0*tau0+18.f*CS*CS*Q)-tau0)*0.5f;
//omega = 1.f/tau;
float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float tau0 = 1.f/omega;
//float Smag = (sqrt(nu0*nu0+18.f*CS*LRFACTOR*LRFACTOR*Q)-nu0)/(6.f*CS*LRFACTOR*LRFACTOR);
//float Smag = (sqrt(4.f/9.f*tau0*tau0+8.f*CS*Q)-2.f/3.f*tau0)/(4.f*CS);
//omega = 1.0f/(3.0f*(nu0+CS*Smag)+0.5f);
float tau = tau0+0.5f*(-tau0+sqrt(tau0*tau0+18.f*sqrt(2.f)*CS*Q));
omega = 1.f/tau;
}
f0 -=- 0.012531328f*(m1)+ 0.047619048f*(m2);
f1 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ -0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f2 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f3 -=-0.0045948204f*(m1)+-0.015873016f*(m2)+ 0.1f*(m4) + 0.055555556f*((m9)*omega-m10);
f4 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m6) +-0.027777778f*((m9)*omega-m10)+ 0.083333333f*((m11)*omega-m12);
f5 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f6 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16-m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f7 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*(-m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+( 0.25f*(m13)));
f8 -= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m6) +0.013888889f*(m10)+0.041666667f*(m12)+0.125f*( m16+m17)+ omega*(0.027777778f*(m9) +0.083333333f*(m11)+(-0.25f*(m13)));
f9 -=-0.0045948204f*(m1)+-0.015873016f*(m2) + -0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f10-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f11-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6+m8)+0.125f*( m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
f12-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16+m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f13-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6-m8)+0.125f*(-m17-m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f14-=-0.0045948204f*(m1)+-0.015873016f*(m2) + 0.1f*(m8)+-0.027777778f*((m9)*omega-m10)+-0.083333333f*((m11)*omega-m12);
f15-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+ 0.025f*(m4-m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*(-m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+(-0.25f*(m15)));
f16-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + 0.025f*(m6-m8)+0.125f*( m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +(-0.25f*(m14)));
f17-= 0.0033416876f*(m1)+ 0.003968254f*(m2)+-0.025f*(m4+m8) +0.013888889f*(m10)-0.041666667f*(m12)+0.125f*( m16-m18)+ omega*(0.027777778f*(m9) -0.083333333f*(m11)+( 0.25f*(m15)));
f18-= 0.0033416876f*(m1)+ 0.003968254f*(m2) + -0.025f*(m6+m8)+0.125f*(-m17+m18)-0.027777778f*(m10)+omega*(-0.055555556f*(m9) +( 0.25f*(m14)));
}
inline __device__ void bgk_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.3333333333f*(rho-1.5f*usqr);
float feq1 = 0.0555555556f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
float feq2 = 0.0555555556f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
float feq3 = 0.0555555556f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
float feq4 = 0.0555555556f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
float feq5 = 0.0277777778f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
float feq6 = 0.0277777778f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
float feq7 = 0.0277777778f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
float feq8 = 0.0277777778f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
float feq9 = 0.0555555556f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
float feq10 = 0.0277777778f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
float feq11 = 0.0277777778f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
float feq12 = 0.0277777778f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
float feq13 = 0.0277777778f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
float feq14 = 0.0555555556f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
float feq15 = 0.0277777778f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
float feq16 = 0.0277777778f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
float feq17 = 0.0277777778f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
float feq18 = 0.0277777778f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_cf(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float SF)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18) -19.f*(u*u+v*v+w*w);
//float m2 = 12.f*f0+-4.f*f1+-4.f*f2+-4.f*f3+-4.f*f4+f5+f6+f7+f8+-4.f*f9+f10+f11+f12+f13+-4.f*f14+f15+f16+f17+f18 +7.53968254f*(u*u+v*v+w*w);
//float m4 = 1.666666667f*(-2.f*f1+2.f*f3+f5-f6-f7+f8+f10-f12+f15-f17);
//float m6 = 1.666666667f*(-2.f*f2+2.f*f4+f5+f6-f7-f8+f11-f13+f16-f18);
//float m8 = 1.666666667f*(-2.f*f9+f10+f11+f12+f13+2.f*f14-f15-f16-f17-f18);
//float m4 = 1.666666667f*(-3.f*f1+3.f*f3+u);
//float m6 = 1.666666667f*(-3.f*f2+3.f*f4+v);
//float m8 = 1.666666667f*(-3.f*f9+3.f*f14+w);
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m10 =-4.f*f1+ 2.f*f2+ -4.f*f3+ 2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+ f10+ -2.f*f11+ f12+-2.f*f13+ 2.f*f14+ f15+ -2.f*f16+ f17+-2.f*f18;
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m12 = -2.f*f2 -2.f*f4+ f5+ f6+ f7+ f8+ 2.f*f9+-f10 +-f12 + 2.f*f14+-f15 +-f17 ;
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float m16 = f5+-f6+-f7+ f8 -f10 + f12 +-f15 + f17 ;
//float m17 = -f5+-f6+ f7+ f8 + f11 +- f13 + f16 +- f18;
//float m18 = f10+- f11+ f12+- f13 +-f15+ f16+-f17+ f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float omega = 1.0f/(3.0f*(UMAX*OBSTR1*2.f/RE)+0.5f);
float omega2 = 2.0f/(1.0f+2.0f*(2.0f/omega-1.0f));
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//we know Smag on coarse mesh
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//omega = 1.0f/(3.0f*(nu0+Cs*Smag*sqrt(2.f))+0.5f);
//omega = 1.0f/(1.0f/omega+3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*sqrt(2.f)*LRFACTOR*LRFACTOR);
//omega = 1.0f/(1.0f/omega +3.f*CS*Smag);
//omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*sqrt(2.f)*LRFACTOR);
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0c = 1.f/omega;
//float tau = tau0c+0.5*(-tau0c+sqrt(tau0c*tau0c+18.f*CS*Q));//tau_total of coarse mesh
//omega = 1.f/tau;//total omega on coarse mesh
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*LRFACTOR*Q));
//omega2= 1.f/tau;
SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);//for post-collision
//SF = omega*0.5f/omega2;//for post-streaming, pre-collision?
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
inline __device__ void mrt_scale_fc_LES(float& f0, float& f1, float& f2,
float& f3 , float& f4 , float& f5 ,
float& f6 , float& f7 , float& f8 , float& f9,
float& f10, float& f11, float& f12,
float& f13, float& f14, float& f15,
float& f16, float& f17, float& f18, float omega, float omega2)
{
float rho,u,v,w;
rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9+
f10+f11+f12+f13+f14+f15+f16+f17+f18;
u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float usqr = u*u+v*v+w*w;
float feq0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float feq1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float feq2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float feq3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float feq4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float feq5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float feq6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float feq7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float feq8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float feq9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float feq10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float feq11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float feq12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float feq13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float feq14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float feq15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float feq16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float feq17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float feq18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
feq1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
feq4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
feq5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
feq8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
feq9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
feq12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
feq14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
feq15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
feq16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
feq17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
feq18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
//float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
//float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
//float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
//float m13 = f5+-f6+ f7+-f8 -u*v;
//float m14 = f11 +- f13 + - f16 + f18 -v*w;
//float m15 = f10 + - f12 +-f15 + f17 -u*w;
//float PI11 = -0.026315789f*m1-0.5f *omega*m9;
//float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
//float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
//float PI12 = -1.5f*omega*m13;
//float PI23 = -1.5f*omega*m14;
//float PI13 = -1.5f*omega*m15;
////we know Smag on fine mesh. Smag_c=Smag_f*sqrt(2)
//float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
////omega = 1.0f/(3.0f*(nu0+CS*Smag*sqrt(2.f))+0.5f);
////omega2 = 1.0f/(1.0f/omega2+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
////omega2 = 1.0f/(1.0f*LRLEVEL/omega2-1.f+0.5f+3.f*CS*Smag*LRFACTOR);
////omega = 1.0f/(1.0f/omega+3.f*CS*Smag/sqrt(2.f));
//float PI11 = (f1-feq1)+(f3-feq3)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17);
//float PI22 = (f2-feq2)+(f4-feq4)+(f5-feq5)+(f6-feq6)+(f7-feq7)+(f8-feq8)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI33 = (f9-feq9)+(f14-feq14)+(f10-feq10)+(f12-feq12)+(f15-feq15)+(f17-feq17)+(f11-feq11)+(f13-feq13)+(f16-feq16)+(f18-feq18);
//float PI12 = (f5-feq5)+(f7-feq7)-(f6-feq6)-(f8-feq8);
//float PI13 = (f10-feq10)+(f17-feq17)-(f12-feq12)-(f15-feq15);
//float PI23 = (f11-feq11)+(f18-feq18)-(f13-feq13)-(f16-feq16);
//
//float Q = sqrt(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13);
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
//float tau0f = 1.f/omega2;
//float tau0c = 1.f/omega;
//float tau = tau0f+0.5*(-tau0f+sqrt(tau0f*tau0f+18.f*CS*sqrt(2.f)*Q));//tau_total of fine
//omega2 = 1.f/tau;//total omega on fine mesh
//tau = LRLEVEL*(tau-tau0f)+tau0c;
//omega= 1.f/tau;
//tau = tau0+0.5*(-tau0+sqrt(tau0*tau0+18.f*CS*Q));
float SF = (omega*(1.0f-omega2))/((1.0f-omega)*omega2);
//float SF = omega2*2.f/omega;
//float SF = ((1.0f-omega)*omega2/LRFACTOR)/(omega*(1.0f-omega2));
//SF = omega*2.f/omega2;
f0 =SF*f0 +(1.0f-SF)*feq0 ;
f1 =SF*f1 +(1.0f-SF)*feq1 ;
f2 =SF*f2 +(1.0f-SF)*feq2 ;
f3 =SF*f3 +(1.0f-SF)*feq3 ;
f4 =SF*f4 +(1.0f-SF)*feq4 ;
f5 =SF*f5 +(1.0f-SF)*feq5 ;
f6 =SF*f6 +(1.0f-SF)*feq6 ;
f7 =SF*f7 +(1.0f-SF)*feq7 ;
f8 =SF*f8 +(1.0f-SF)*feq8 ;
f9 =SF*f9 +(1.0f-SF)*feq9 ;
f10=SF*f10+(1.0f-SF)*feq10;
f11=SF*f11+(1.0f-SF)*feq11;
f12=SF*f12+(1.0f-SF)*feq12;
f13=SF*f13+(1.0f-SF)*feq13;
f14=SF*f14+(1.0f-SF)*feq14;
f15=SF*f15+(1.0f-SF)*feq15;
f16=SF*f16+(1.0f-SF)*feq16;
f17=SF*f17+(1.0f-SF)*feq17;
f18=SF*f18+(1.0f-SF)*feq18;
}
__device__ int dmin(int a, int b)
{
if (a<b) return a;
else return b-1;
}
__device__ int dmax(int a)
{
if (a>-1) return a;
else return 0;
}
__device__ int dmin_p(int a, int b)
{
if (a<b) return a;
else return 0;
}
__device__ int dmax_p(int a, int b)
{
if (a>-1) return a;
else return b-1;
}
inline __device__ int f_mem(int f_num, int x, int y, int z, size_t pitch, int zInner)
{
int index = (x+y*pitch+z*YDIM*pitch)+f_num*pitch*YDIM*(zInner);
index = dmax(index);
index = dmin(index,19*pitch*YDIM*(zInner));
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM*ZDIM/GPU_N-2) index = 19*pitch*(YDIM*ZDIM/GPU_N-2);
return index;
}
inline __device__ int buff_mem(int f_num, int x, int y, size_t pitch)
{
int index = (x+y*pitch)+f_num*pitch*YDIM;
index = dmax(index);
index = dmin(index,19*pitch*YDIM);
// if(index<0) index = 0;
// else if(index>19*pitch*YDIM) index = 19*pitch*YDIM;
return index;
}
__global__ void update_inner(float* fA, float* fB, float* g, float* h,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*(zInner+2)+1+z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
// if(REFINEMENT == "YES" && x > LRX0+1 && x < LRX0+(XLRDIM-1)*LRFACTOR-1
// && y > LRY0+1 && y < LRY0+(YLRDIM-1)*LRFACTOR-1 && z > LRZ0+1 && z < LRZ0+(ZLRDIM-1)*LRFACTOR-1 ||
// (x>XDIM-1)){
// }
// else{
f0 = fA[j];
f1 = fA[f_mem (1 ,x-1,y ,z ,pitch, zInner)];
f3 = fA[f_mem (3 ,x+1,y ,z ,pitch, zInner)];
f2 = fA[f_mem (2 ,x ,y-1,z ,pitch, zInner)];
f5 = fA[f_mem (5 ,x-1,y-1,z ,pitch, zInner)];
f6 = fA[f_mem (6 ,x+1,y-1,z ,pitch, zInner)];
f4 = fA[f_mem (4 ,x ,y+1,z ,pitch, zInner)];
f7 = fA[f_mem (7 ,x+1,y+1,z ,pitch, zInner)];
f8 = fA[f_mem (8 ,x-1,y+1,z ,pitch, zInner)];
if(z==zInner){//top nodes need info from h
f9 = fA[f_mem (9 ,x ,y ,z-1,pitch, zInner)];
f10= fA[f_mem (10,x-1,y ,z-1,pitch, zInner)];
f11= fA[f_mem (11,x ,y-1,z-1,pitch, zInner)];
f12= fA[f_mem (12,x+1,y ,z-1,pitch, zInner)];
f13= fA[f_mem (13,x ,y+1,z-1,pitch, zInner)];
f14= h [buff_mem(14,x ,y ,pitch)];
f15= h [buff_mem(15,x-1,y ,pitch)];
f16= h [buff_mem(16,x ,y-1,pitch)];
f17= h [buff_mem(17,x+1,y ,pitch)];
f18= h [buff_mem(18,x ,y+1,pitch)];
}
else if(z==0){//bottom nodes need info from g
f9 = g [buff_mem(9 ,x ,y ,pitch)];
f10= g [buff_mem(10,x-1,y ,pitch)];
f11= g [buff_mem(11,x ,y-1,pitch)];
f12= g [buff_mem(12,x+1,y ,pitch)];
f13= g [buff_mem(13,x ,y+1,pitch)];
f14= fA[f_mem (14,x ,y ,z+1,pitch, zInner)];
f15= fA[f_mem (15,x-1,y ,z+1,pitch, zInner)];
f16= fA[f_mem (16,x ,y-1,z+1,pitch, zInner)];
f17= fA[f_mem (17,x+1,y ,z+1,pitch, zInner)];
f18= fA[f_mem (18,x ,y+1,z+1,pitch, zInner)];
}
else{//normal nodes
f9 = fA[f_mem(9 ,x ,y ,z,pitch,zInner)];
f10= fA[f_mem(10,x-1,y ,z,pitch,zInner)];
f11= fA[f_mem(11,x ,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x+1,y ,z,pitch,zInner)];
f13= fA[f_mem(13,x ,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x ,y ,z,pitch,zInner)];
f15= fA[f_mem(15,x-1,y ,z,pitch,zInner)];
f16= fA[f_mem(16,x ,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x+1,y ,z,pitch,zInner)];
f18= fA[f_mem(18,x ,y+1,z,pitch,zInner)];
}//end normal nodes
if(im == 1 || im ==10){//BB
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f14;
fB[f_mem(10,x,y,z,pitch,zInner)] = f17;
fB[f_mem(11,x,y,z,pitch,zInner)] = f18;
fB[f_mem(12,x,y,z,pitch,zInner)] = f15;
fB[f_mem(13,x,y,z,pitch,zInner)] = f16;
fB[f_mem(14,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(15,x,y,z,pitch,zInner)] = f12;
fB[f_mem(16,x,y,z,pitch,zInner)] = f13;
fB[f_mem(17,x,y,z,pitch,zInner)] = f10;
fB[f_mem(18,x,y,z,pitch,zInner)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = fA[f_mem(0 ,x,y-1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y-1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y-1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y-1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y-1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y-1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y-1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y-1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y-1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y-1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y-1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y-1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y-1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y-1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y-1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y-1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y-1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y-1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y-1,z,pitch,zInner)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = fA[f_mem(0 ,x,y+1,z,pitch,zInner)];
f1 = fA[f_mem(1 ,x,y+1,z,pitch,zInner)];
f3 = fA[f_mem(3 ,x,y+1,z,pitch,zInner)];
f2 = fA[f_mem(2 ,x,y+1,z,pitch,zInner)];
f5 = fA[f_mem(5 ,x,y+1,z,pitch,zInner)];
f6 = fA[f_mem(6 ,x,y+1,z,pitch,zInner)];
f4 = fA[f_mem(4 ,x,y+1,z,pitch,zInner)];
f7 = fA[f_mem(7 ,x,y+1,z,pitch,zInner)];
f8 = fA[f_mem(8 ,x,y+1,z,pitch,zInner)];
f9 = fA[f_mem(9 ,x,y+1,z,pitch,zInner)];
f10= fA[f_mem(10,x,y+1,z,pitch,zInner)];
f11= fA[f_mem(11,x,y+1,z,pitch,zInner)];
f12= fA[f_mem(12,x,y+1,z,pitch,zInner)];
f13= fA[f_mem(13,x,y+1,z,pitch,zInner)];
f14= fA[f_mem(14,x,y+1,z,pitch,zInner)];
f15= fA[f_mem(15,x,y+1,z,pitch,zInner)];
f16= fA[f_mem(16,x,y+1,z,pitch,zInner)];
f17= fA[f_mem(17,x,y+1,z,pitch,zInner)];
f18= fA[f_mem(18,x,y+1,z,pitch,zInner)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
fB[f_mem(0 ,x,y,z,pitch,zInner)] = f0 ;
fB[f_mem(1 ,x,y,z,pitch,zInner)] = f1 ;
fB[f_mem(2 ,x,y,z,pitch,zInner)] = f2 ;
fB[f_mem(3 ,x,y,z,pitch,zInner)] = f3 ;
fB[f_mem(4 ,x,y,z,pitch,zInner)] = f4 ;
fB[f_mem(5 ,x,y,z,pitch,zInner)] = f5 ;
fB[f_mem(6 ,x,y,z,pitch,zInner)] = f6 ;
fB[f_mem(7 ,x,y,z,pitch,zInner)] = f7 ;
fB[f_mem(8 ,x,y,z,pitch,zInner)] = f8 ;
fB[f_mem(9 ,x,y,z,pitch,zInner)] = f9 ;
fB[f_mem(10,x,y,z,pitch,zInner)] = f10;
fB[f_mem(11,x,y,z,pitch,zInner)] = f11;
fB[f_mem(12,x,y,z,pitch,zInner)] = f12;
fB[f_mem(13,x,y,z,pitch,zInner)] = f13;
fB[f_mem(14,x,y,z,pitch,zInner)] = f14;
fB[f_mem(15,x,y,z,pitch,zInner)] = f15;
fB[f_mem(16,x,y,z,pitch,zInner)] = f16;
fB[f_mem(17,x,y,z,pitch,zInner)] = f17;
fB[f_mem(18,x,y,z,pitch,zInner)] = f18;
}
// }
}
__global__ void update_bottom(float* gA, float* gB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (zInner+2);
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,GPU*z);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = gA [j];
f1 = gA [buff_mem(1 ,x-1,y ,pitch)];
f3 = gA [buff_mem(3 ,x+1,y ,pitch)];
f2 = gA [buff_mem(2 ,x ,y-1,pitch)];
f5 = gA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = gA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = gA [buff_mem(4 ,x ,y+1,pitch)];
f7 = gA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = gA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = temp[buff_mem(9 ,x ,y ,pitch)];
f10= temp[buff_mem(10,x-1,y ,pitch)];
f11= temp[buff_mem(11,x ,y-1,pitch)];
f12= temp[buff_mem(12,x+1,y ,pitch)];
f13= temp[buff_mem(13,x ,y+1,pitch)];
f14= f [f_mem (14,x ,y ,0,pitch, zInner)];
f15= f [f_mem (15,x-1,y ,0,pitch, zInner)];
f16= f [f_mem (16,x ,y-1,0,pitch, zInner)];
f17= f [f_mem (17,x+1,y ,0,pitch, zInner)];
f18= f [f_mem (18,x ,y+1,0,pitch, zInner)];
if(im == 1 || im ==10){//BB
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f3 ;
gB[buff_mem(2 ,x,y,pitch)] = f4 ;
gB[buff_mem(3 ,x,y,pitch)] = f1 ;
gB[buff_mem(4 ,x,y,pitch)] = f2 ;
gB[buff_mem(5 ,x,y,pitch)] = f7 ;
gB[buff_mem(6 ,x,y,pitch)] = f8 ;
gB[buff_mem(7 ,x,y,pitch)] = f5 ;
gB[buff_mem(8 ,x,y,pitch)] = f6 ;
gB[buff_mem(9 ,x,y,pitch)] = f14;
gB[buff_mem(10,x,y,pitch)] = f17;
gB[buff_mem(11,x,y,pitch)] = f18;
gB[buff_mem(12,x,y,pitch)] = f15;
gB[buff_mem(13,x,y,pitch)] = f16;
gB[buff_mem(14,x,y,pitch)] = f9 ;
gB[buff_mem(15,x,y,pitch)] = f12;
gB[buff_mem(16,x,y,pitch)] = f13;
gB[buff_mem(17,x,y,pitch)] = f10;
gB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = gA[buff_mem(0 ,x,y-1,pitch)];
f1 = gA[buff_mem(1 ,x,y-1,pitch)];
f3 = gA[buff_mem(3 ,x,y-1,pitch)];
f2 = gA[buff_mem(2 ,x,y-1,pitch)];
f5 = gA[buff_mem(5 ,x,y-1,pitch)];
f6 = gA[buff_mem(6 ,x,y-1,pitch)];
f4 = gA[buff_mem(4 ,x,y-1,pitch)];
f7 = gA[buff_mem(7 ,x,y-1,pitch)];
f8 = gA[buff_mem(8 ,x,y-1,pitch)];
f9 = gA[buff_mem(9 ,x,y-1,pitch)];
f10= gA[buff_mem(10,x,y-1,pitch)];
f11= gA[buff_mem(11,x,y-1,pitch)];
f12= gA[buff_mem(12,x,y-1,pitch)];
f13= gA[buff_mem(13,x,y-1,pitch)];
f14= gA[buff_mem(14,x,y-1,pitch)];
f15= gA[buff_mem(15,x,y-1,pitch)];
f16= gA[buff_mem(16,x,y-1,pitch)];
f17= gA[buff_mem(17,x,y-1,pitch)];
f18= gA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = gA[buff_mem(0 ,x,y+1,pitch)];
f1 = gA[buff_mem(1 ,x,y+1,pitch)];
f3 = gA[buff_mem(3 ,x,y+1,pitch)];
f2 = gA[buff_mem(2 ,x,y+1,pitch)];
f5 = gA[buff_mem(5 ,x,y+1,pitch)];
f6 = gA[buff_mem(6 ,x,y+1,pitch)];
f4 = gA[buff_mem(4 ,x,y+1,pitch)];
f7 = gA[buff_mem(7 ,x,y+1,pitch)];
f8 = gA[buff_mem(8 ,x,y+1,pitch)];
f9 = gA[buff_mem(9 ,x,y+1,pitch)];
f10= gA[buff_mem(10,x,y+1,pitch)];
f11= gA[buff_mem(11,x,y+1,pitch)];
f12= gA[buff_mem(12,x,y+1,pitch)];
f13= gA[buff_mem(13,x,y+1,pitch)];
f14= gA[buff_mem(14,x,y+1,pitch)];
f15= gA[buff_mem(15,x,y+1,pitch)];
f16= gA[buff_mem(16,x,y+1,pitch)];
f17= gA[buff_mem(17,x,y+1,pitch)];
f18= gA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
gB[buff_mem(0 ,x,y,pitch)] = f0 ;
gB[buff_mem(1 ,x,y,pitch)] = f1 ;
gB[buff_mem(2 ,x,y,pitch)] = f2 ;
gB[buff_mem(3 ,x,y,pitch)] = f3 ;
gB[buff_mem(4 ,x,y,pitch)] = f4 ;
gB[buff_mem(5 ,x,y,pitch)] = f5 ;
gB[buff_mem(6 ,x,y,pitch)] = f6 ;
gB[buff_mem(7 ,x,y,pitch)] = f7 ;
gB[buff_mem(8 ,x,y,pitch)] = f8 ;
gB[buff_mem(9 ,x,y,pitch)] = f9 ;
gB[buff_mem(10,x,y,pitch)] = f10;
gB[buff_mem(11,x,y,pitch)] = f11;
gB[buff_mem(12,x,y,pitch)] = f12;
gB[buff_mem(13,x,y,pitch)] = f13;
gB[buff_mem(14,x,y,pitch)] = f14;
gB[buff_mem(15,x,y,pitch)] = f15;
gB[buff_mem(16,x,y,pitch)] = f16;
gB[buff_mem(17,x,y,pitch)] = f17;
gB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__global__ void update_top(float* hA, float* hB, float* f, float* temp,
float omega, size_t pitch, int GPU, int zInner)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = (GPU+1)*(zInner+2)-1;//physical coord
int j = x+y*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,(GPU+1)*(zInner+2)-1);
float f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18;
f0 = hA[j];
f1 = hA [buff_mem(1 ,x-1,y ,pitch)];
f3 = hA [buff_mem(3 ,x+1,y ,pitch)];
f2 = hA [buff_mem(2 ,x ,y-1,pitch)];
f5 = hA [buff_mem(5 ,x-1,y-1,pitch)];
f6 = hA [buff_mem(6 ,x+1,y-1,pitch)];
f4 = hA [buff_mem(4 ,x ,y+1,pitch)];
f7 = hA [buff_mem(7 ,x+1,y+1,pitch)];
f8 = hA [buff_mem(8 ,x-1,y+1,pitch)];
f9 = f [f_mem (9 ,x ,y ,zInner-1,pitch, zInner)];
f10= f [f_mem (10,x-1,y ,zInner-1,pitch, zInner)];
f11= f [f_mem (11,x ,y-1,zInner-1,pitch, zInner)];
f12= f [f_mem (12,x+1,y ,zInner-1,pitch, zInner)];
f13= f [f_mem (13,x ,y+1,zInner-1,pitch, zInner)];
f14= temp[buff_mem(14,x ,y ,pitch)];
f15= temp[buff_mem(15,x-1,y ,pitch)];
f16= temp[buff_mem(16,x ,y-1,pitch)];
f17= temp[buff_mem(17,x+1,y ,pitch)];
f18= temp[buff_mem(18,x ,y+1,pitch)];
if(im == 1 || im ==10){//BB
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f3 ;
hB[buff_mem(2 ,x,y,pitch)] = f4 ;
hB[buff_mem(3 ,x,y,pitch)] = f1 ;
hB[buff_mem(4 ,x,y,pitch)] = f2 ;
hB[buff_mem(5 ,x,y,pitch)] = f7 ;
hB[buff_mem(6 ,x,y,pitch)] = f8 ;
hB[buff_mem(7 ,x,y,pitch)] = f5 ;
hB[buff_mem(8 ,x,y,pitch)] = f6 ;
hB[buff_mem(9 ,x,y,pitch)] = f14;
hB[buff_mem(10,x,y,pitch)] = f17;
hB[buff_mem(11,x,y,pitch)] = f18;
hB[buff_mem(12,x,y,pitch)] = f15;
hB[buff_mem(13,x,y,pitch)] = f16;
hB[buff_mem(14,x,y,pitch)] = f9 ;
hB[buff_mem(15,x,y,pitch)] = f12;
hB[buff_mem(16,x,y,pitch)] = f13;
hB[buff_mem(17,x,y,pitch)] = f10;
hB[buff_mem(18,x,y,pitch)] = f11;
}
else{
if(im == 100)//north outlet
{
f0 = hA[buff_mem(0 ,x,y-1,pitch)];
f1 = hA[buff_mem(1 ,x,y-1,pitch)];
f3 = hA[buff_mem(3 ,x,y-1,pitch)];
f2 = hA[buff_mem(2 ,x,y-1,pitch)];
f5 = hA[buff_mem(5 ,x,y-1,pitch)];
f6 = hA[buff_mem(6 ,x,y-1,pitch)];
f4 = hA[buff_mem(4 ,x,y-1,pitch)];
f7 = hA[buff_mem(7 ,x,y-1,pitch)];
f8 = hA[buff_mem(8 ,x,y-1,pitch)];
f9 = hA[buff_mem(9 ,x,y-1,pitch)];
f10= hA[buff_mem(10,x,y-1,pitch)];
f11= hA[buff_mem(11,x,y-1,pitch)];
f12= hA[buff_mem(12,x,y-1,pitch)];
f13= hA[buff_mem(13,x,y-1,pitch)];
f14= hA[buff_mem(14,x,y-1,pitch)];
f15= hA[buff_mem(15,x,y-1,pitch)];
f16= hA[buff_mem(16,x,y-1,pitch)];
f17= hA[buff_mem(17,x,y-1,pitch)];
f18= hA[buff_mem(18,x,y-1,pitch)];
North_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,1.0f);
}
if(im == 200)//south inlet
{
f0 = hA[buff_mem(0 ,x,y+1,pitch)];
f1 = hA[buff_mem(1 ,x,y+1,pitch)];
f3 = hA[buff_mem(3 ,x,y+1,pitch)];
f2 = hA[buff_mem(2 ,x,y+1,pitch)];
f5 = hA[buff_mem(5 ,x,y+1,pitch)];
f6 = hA[buff_mem(6 ,x,y+1,pitch)];
f4 = hA[buff_mem(4 ,x,y+1,pitch)];
f7 = hA[buff_mem(7 ,x,y+1,pitch)];
f8 = hA[buff_mem(8 ,x,y+1,pitch)];
f9 = hA[buff_mem(9 ,x,y+1,pitch)];
f10= hA[buff_mem(10,x,y+1,pitch)];
f11= hA[buff_mem(11,x,y+1,pitch)];
f12= hA[buff_mem(12,x,y+1,pitch)];
f13= hA[buff_mem(13,x,y+1,pitch)];
f14= hA[buff_mem(14,x,y+1,pitch)];
f15= hA[buff_mem(15,x,y+1,pitch)];
f16= hA[buff_mem(16,x,y+1,pitch)];
f17= hA[buff_mem(17,x,y+1,pitch)];
f18= hA[buff_mem(18,x,y+1,pitch)];
South_Extrap(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,UMAX);
}
boundaries(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,y,z,im);
if(MODEL == "MRT")
mrt_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
else if(MODEL == "BGK")
bgk_collide(f0,f1,f2,f3,f4,f5,f6,f7,f8,f9,f10,f11,f12,f13,f14,f15,f16,f17,f18,omega);
hB[buff_mem(0 ,x,y,pitch)] = f0 ;
hB[buff_mem(1 ,x,y,pitch)] = f1 ;
hB[buff_mem(2 ,x,y,pitch)] = f2 ;
hB[buff_mem(3 ,x,y,pitch)] = f3 ;
hB[buff_mem(4 ,x,y,pitch)] = f4 ;
hB[buff_mem(5 ,x,y,pitch)] = f5 ;
hB[buff_mem(6 ,x,y,pitch)] = f6 ;
hB[buff_mem(7 ,x,y,pitch)] = f7 ;
hB[buff_mem(8 ,x,y,pitch)] = f8 ;
hB[buff_mem(9 ,x,y,pitch)] = f9 ;
hB[buff_mem(10,x,y,pitch)] = f10;
hB[buff_mem(11,x,y,pitch)] = f11;
hB[buff_mem(12,x,y,pitch)] = f12;
hB[buff_mem(13,x,y,pitch)] = f13;
hB[buff_mem(14,x,y,pitch)] = f14;
hB[buff_mem(15,x,y,pitch)] = f15;
hB[buff_mem(16,x,y,pitch)] = f16;
hB[buff_mem(17,x,y,pitch)] = f17;
hB[buff_mem(18,x,y,pitch)] = f18;
}
// }
}
__device__ __inline__ float ld_gb1_cg(const float *addr)
{
float return_value;
asm("ld.global.cg.f32 %0, [%1];" : "=f"(return_value) : "l"(addr));
return return_value;
}
__global__ void initialize_single(float *f, size_t pitch, int GPU_N)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int z = threadIdx.z+blockIdx.z*blockDim.z;
int j = x+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
int im = ImageFcn(x,y,z);
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.05f;
v = UMAX;
w = 0.0f;
// if(im == 10 || im == 1){
// u = 0.0f;
// v = 0.0f;
// w = 0.0f;
// }
//if(x == 3 ) u = 0.1f;
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
f[j+0 *pitch*YDIM*ZDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
f[j+1 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+2 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+3 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
f[j+4 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
f[j+5 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
f[j+6 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
f[j+7 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
f[j+8 *pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
f[j+9 *pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+10*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
f[j+11*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
f[j+12*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
f[j+13*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
f[j+14*pitch*YDIM*ZDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
f[j+15*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
f[j+16*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
f[j+17*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
f[j+18*pitch*YDIM*ZDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
float f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f[j+0 *pitch*YDIM*(ZDIM/GPU_N-2)]=f0 ;
f[j+1 *pitch*YDIM*(ZDIM/GPU_N-2)]=f1 ;
f[j+2 *pitch*YDIM*(ZDIM/GPU_N-2)]=f2 ;
f[j+3 *pitch*YDIM*(ZDIM/GPU_N-2)]=f3 ;
f[j+4 *pitch*YDIM*(ZDIM/GPU_N-2)]=f4 ;
f[j+5 *pitch*YDIM*(ZDIM/GPU_N-2)]=f5 ;
f[j+6 *pitch*YDIM*(ZDIM/GPU_N-2)]=f6 ;
f[j+7 *pitch*YDIM*(ZDIM/GPU_N-2)]=f7 ;
f[j+8 *pitch*YDIM*(ZDIM/GPU_N-2)]=f8 ;
f[j+9 *pitch*YDIM*(ZDIM/GPU_N-2)]=f9 ;
f[j+10*pitch*YDIM*(ZDIM/GPU_N-2)]=f10;
f[j+11*pitch*YDIM*(ZDIM/GPU_N-2)]=f11;
f[j+12*pitch*YDIM*(ZDIM/GPU_N-2)]=f12;
f[j+13*pitch*YDIM*(ZDIM/GPU_N-2)]=f13;
f[j+14*pitch*YDIM*(ZDIM/GPU_N-2)]=f14;
f[j+15*pitch*YDIM*(ZDIM/GPU_N-2)]=f15;
f[j+16*pitch*YDIM*(ZDIM/GPU_N-2)]=f16;
f[j+17*pitch*YDIM*(ZDIM/GPU_N-2)]=f17;
f[j+18*pitch*YDIM*(ZDIM/GPU_N-2)]=f18;
}
if(x == XDIM-1){
for(int i = XDIM; i<pitch; i++){
j = i+y*pitch+z*YDIM*pitch;//index on padded mem (pitch in elements)
f[j+0 *pitch*YDIM*ZDIM]=0.f;
f[j+1 *pitch*YDIM*ZDIM]=0.f;
f[j+2 *pitch*YDIM*ZDIM]=0.f;
f[j+3 *pitch*YDIM*ZDIM]=0.f;
f[j+4 *pitch*YDIM*ZDIM]=0.f;
f[j+5 *pitch*YDIM*ZDIM]=0.f;
f[j+6 *pitch*YDIM*ZDIM]=0.f;
f[j+7 *pitch*YDIM*ZDIM]=0.f;
f[j+8 *pitch*YDIM*ZDIM]=0.f;
f[j+9 *pitch*YDIM*ZDIM]=0.f;
f[j+10*pitch*YDIM*ZDIM]=0.f;
f[j+11*pitch*YDIM*ZDIM]=0.f;
f[j+12*pitch*YDIM*ZDIM]=0.f;
f[j+13*pitch*YDIM*ZDIM]=0.f;
f[j+14*pitch*YDIM*ZDIM]=0.f;
f[j+15*pitch*YDIM*ZDIM]=0.f;
f[j+16*pitch*YDIM*ZDIM]=0.f;
f[j+17*pitch*YDIM*ZDIM]=0.f;
f[j+18*pitch*YDIM*ZDIM]=0.f;
}
}
}
__global__ void initialize_buffer(float *g, size_t pitch)//pitch in elements
{
int x = threadIdx.x+blockIdx.x*blockDim.x;//coord in linear mem
int y = threadIdx.y+blockIdx.y*blockDim.y;
int j = x+y*pitch;//index on padded mem (pitch in elements)
float u,v,w,rho,usqr;
rho = 1.f;
u = 0.05f;
v = UMAX;
w = 0.0f;
usqr = u*u+v*v+w*w;
if(MODEL == "BGK"){
g[j+0 *pitch*YDIM]= 1.0f/3.0f*(rho-1.5f*usqr);
g[j+1 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+2 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+3 *pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*u+4.5f*u*u-1.5f*usqr);
g[j+4 *pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*v+4.5f*v*v-1.5f*usqr);
g[j+5 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u+v)+4.5f*(u+v)*(u+v)-1.5f*usqr);
g[j+6 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u+v)+4.5f*(-u+v)*(-u+v)-1.5f*usqr);
g[j+7 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u-v)+4.5f*(-u-v)*(-u-v)-1.5f*usqr);
g[j+8 *pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u-v)+4.5f*(u-v)*(u-v)-1.5f*usqr);
g[j+9 *pitch*YDIM]= 1.0f/18.0f*(rho+3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+10*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u+w)+4.5f*(u+w)*(u+w)-1.5f*usqr);
g[j+11*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(v+w)+4.5f*(v+w)*(v+w)-1.5f*usqr);
g[j+12*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u+w)+4.5f*(-u+w)*(-u+w)-1.5f*usqr);
g[j+13*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-v+w)+4.5f*(-v+w)*(-v+w)-1.5f*usqr);
g[j+14*pitch*YDIM]= 1.0f/18.0f*(rho-3.0f*w+4.5f*w*w-1.5f*usqr);
g[j+15*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(u-w)+4.5f*(u-w)*(u-w)-1.5f*usqr);
g[j+16*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(v-w)+4.5f*(v-w)*(v-w)-1.5f*usqr);
g[j+17*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-u-w)+4.5f*(-u-w)*(-u-w)-1.5f*usqr);
g[j+18*pitch*YDIM]= 1.0f/36.0f*(rho+3.0f*(-v-w)+4.5f*(-v-w)*(-v-w)-1.5f*usqr);
}
else{
float f0 = 0.1904761791f*rho+-0.597127747f*usqr ;
float f1 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*u ;
float f2 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*v ;
float f3 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*u ;
float f4 = 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*v ;
float f5 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+v) ;
float f6 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-v) ;
float f7 = 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u+v) ;
float f8 = 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u-v) ;
float f9 = 0.1031746045f*rho+ 0.032375918f*usqr+ 0.1666666667f*w;
float f10= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( u+w) ;
float f11= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*( v+w);
float f12= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( u-w) ;
float f13= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*( v-w);
float f14= 0.1031746045f*rho+ 0.032375918f*usqr+ -0.1666666667f*w;
float f15= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(u-w) ;
float f16= 0.0158730149f*rho+ 0.033572690f*usqr+ 0.083333333f*(v-w);
float f17= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(u+w) ;
float f18= 0.0158730149f*rho+ 0.033572690f*usqr+ -0.083333333f*(v+w);
f1 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f2 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f3 += 0.055555556f*(2.f*u*u-(v*v+w*w));
f4 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w);
f5 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f6 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f7 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ 0.25f*u*v ;
f8 += 0.027777778f*(2.f*u*u-(v*v+w*w))+ 0.083333333f*(v*v-w*w)+ -0.25f*u*v ;
f9 += -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f10+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f11+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
f12+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f13+= -0.055555556f*(2.f*u*u-(v*v+w*w)) -0.25f*v*w ;
f14+= -0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) ;
f15+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + -0.25f*u*w;
f16+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + -0.25f*v*w ;
f17+= 0.027777778f*(2.f*u*u-(v*v+w*w))+ -0.083333333f*(v*v-w*w) + 0.25f*u*w;
f18+= -0.055555556f*(2.f*u*u-(v*v+w*w)) + 0.25f*v*w ;
g[j+0 *pitch*YDIM]=f0 ;
g[j+1 *pitch*YDIM]=f1 ;
g[j+2 *pitch*YDIM]=f2 ;
g[j+3 *pitch*YDIM]=f3 ;
g[j+4 *pitch*YDIM]=f4 ;
g[j+5 *pitch*YDIM]=f5 ;
g[j+6 *pitch*YDIM]=f6 ;
g[j+7 *pitch*YDIM]=f7 ;
g[j+8 *pitch*YDIM]=f8 ;
g[j+9 *pitch*YDIM]=f9 ;
g[j+10*pitch*YDIM]=f10;
g[j+11*pitch*YDIM]=f11;
g[j+12*pitch*YDIM]=f12;
g[j+13*pitch*YDIM]=f13;
g[j+14*pitch*YDIM]=f14;
g[j+15*pitch*YDIM]=f15;
g[j+16*pitch*YDIM]=f16;
g[j+17*pitch*YDIM]=f17;
g[j+18*pitch*YDIM]=f18;
}
}
//zMin = minimum zcoord, zNum = number of nodes in z
void WriteResults(float *f, ofstream &output, float omega, int zMin, int zNum)
{
for(int k = 0; k<zNum; k++){
for(int i = 0; i<YDIM; i++){
for(int j = 0; j<XDIM; j++){
int index = i*XDIM+j;
float f0 = f[index+XDIM*YDIM*zNum*0 ];
float f1 = f[index+XDIM*YDIM*zNum*1 ];
float f2 = f[index+XDIM*YDIM*zNum*2 ];
float f3 = f[index+XDIM*YDIM*zNum*3 ];
float f4 = f[index+XDIM*YDIM*zNum*4 ];
float f5 = f[index+XDIM*YDIM*zNum*5 ];
float f6 = f[index+XDIM*YDIM*zNum*6 ];
float f7 = f[index+XDIM*YDIM*zNum*7 ];
float f8 = f[index+XDIM*YDIM*zNum*8 ];
float f9 = f[index+XDIM*YDIM*zNum*9 ];
float f10= f[index+XDIM*YDIM*zNum*10];
float f11= f[index+XDIM*YDIM*zNum*11];
float f12= f[index+XDIM*YDIM*zNum*12];
float f13= f[index+XDIM*YDIM*zNum*13];
float f14= f[index+XDIM*YDIM*zNum*14];
float f15= f[index+XDIM*YDIM*zNum*15];
float f16= f[index+XDIM*YDIM*zNum*16];
float f17= f[index+XDIM*YDIM*zNum*17];
float f18= f[index+XDIM*YDIM*zNum*18];
float rho = f0+f1+f2+f3+f4+f5+f6+f7+f8+f9;
rho +=f10+f11+f12+f13+f14+f15+f16+f17+f18;
float u = f1-f3+f5-f6-f7+f8+f10-f12+f15-f17;
float v = f2-f4+f5+f6-f7-f8+f11-f13+f16-f18;
float w = f9+f10+f11+f12+f13-f14-f15-f16-f17-f18;
float m1 = 19.f*(-f0+ f5+f6+f7+f8+f10+f11+f12+f13+f15+f16+f17+f18 -(u*u+v*v+w*w));
float m9 = 2.f*f1+ - f2+ 2.f*f3+ - f4+ f5+ f6+ f7+ f8+- f9+ f10+ -2.f*f11+ f12+-2.f*f13+- f14+ f15+ -2.f*f16+ f17+-2.f*f18 -(2.f*u*u-(v*v+w*w));
float m11 = f2 + f4+ f5+ f6+ f7+ f8+- f9+-f10 +-f12 +- f14+-f15 +-f17 -(v*v-w*w);
float m13 = f5+-f6+ f7+-f8 -u*v;
float m14 = f11 +- f13 + - f16 + f18 -v*w;
float m15 = f10 + - f12 +-f15 + f17 -u*w;
float PI11 = -0.026315789f*m1-0.5f *omega*m9;
float PI22 = -0.026315789f*m1+0.25f*omega*(m9-3.0f*m11);
float PI33 = -0.026315789f*m1+0.25f*omega*(m9+3.0f*m11);
float PI12 = -1.5f*omega*m13;
float PI23 = -1.5f*omega*m14;
float PI13 = -1.5f*omega*m15;
//float nu0 = ((1.0f/omega)-0.5f)/3.0f;
float Smag = sqrt(2.f*(PI11*PI11+PI22*PI22+PI33*PI33+2.f*PI12*PI12+2.f*PI23*PI23+2.f*PI13*PI13));
output<<j<<", "<<i<<", "<<zMin+k<<", "<<u<<","<<v<<","<<w<<","<<rho<<","
//<<uAv_h[i]<<","<<vAv_h[i]<<", "<<ufluc_h[i]<<","<<vfluc_h[i]<<endl;
<<f0<<","<<f1<<", "<<f9<<","<<f18<<endl;
}}}
}
int main(int argc, char *argv[])
{
int GPU_N;
cudaGetDeviceCount(&GPU_N);
GPU_N = 1;
cout<<"number of GPUs: "<<GPU_N<<endl;
//int *image_d, *image_h;
ofstream output;
ofstream output2;
string FileName = CASENAME;
//output.open ("LBM1_out.dat");
output.open ((FileName+".dat").c_str());
output2.open ((FileName+".force").c_str());
//size_t memsize, memsize2;
size_t pitch = 2;
while(pitch<XDIM)
pitch=pitch*2;
pitch = pitch*sizeof(float);
size_t pitch_elements = pitch/sizeof(float);
cout<<"Pitch (in elements): "<<pitch/sizeof(float)<<endl;
int i, nBlocks;
float omega, CharLength;
CharLength = OBSTR1*2.f;
omega = 1.0f/(3.0f*(UMAX*CharLength/RE)+0.5f);
cout<<"omega : "<<omega<<endl;
cout<<"blocksize: "<<BLOCKSIZEX<<"x"<<BLOCKSIZEY<<"x"<<BLOCKSIZEZ<<endl;
cout<<"grid: "<<XDIM<<"x"<<YDIM<<"x"<<ZDIM<<endl;
cout<<"TMAX: "<<TMAX<<endl;
cout<<"Method: "<<METHOD<<endl;
cout<<"Model: "<<MODEL<<endl;
int zInner = ZDIM/GPU_N-2; //excluding halo
//int zGPU = ZDIM/GPU_N;//z nodes per GPU (including halo)
//nBlocks does not include the halo layers
nBlocks = ((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX)*((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY)
*((zInner+BLOCKSIZEZ-1)/BLOCKSIZEZ);
cout<<"nBlocks:"<<nBlocks<<endl;
dim3 threads(BLOCKSIZEX, BLOCKSIZEY, BLOCKSIZEZ);
//2 halo layers per GPU (for 2 GPUs)
dim3 grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),(zInner)/BLOCKSIZEZ);
dim3 g_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
dim3 h_grid(((XDIM+BLOCKSIZEX-1)/BLOCKSIZEX),((YDIM+BLOCKSIZEY-1)/BLOCKSIZEY),1);
cudaStream_t stream_halo[GPU_N];
cudaStream_t stream_inner[GPU_N];
//data pointers as 3D array (GPUxCoord)
float *f_inner_h[GPU_N], *g_h[GPU_N], *h_h[GPU_N];
float *f_inner_A_d[GPU_N], *g_A_d[GPU_N], *h_A_d[GPU_N];
float *f_inner_B_d[GPU_N], *g_B_d[GPU_N], *h_B_d[GPU_N];
float *g_temp[GPU_N], *h_temp[GPU_N];
//Malloc and Initialize for each GPU
for(int n = 0; n<GPU_N; n++){
f_inner_h[n] = (float *)malloc(XDIM*YDIM*zInner*19*sizeof(float));
g_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
h_h [n] = (float *)malloc(XDIM*YDIM* 19*sizeof(float));
cudaSetDevice(n);
cudaStreamCreate(&stream_halo[n]);
cudaStreamCreate(&stream_inner[n]);
for(int m = 0; m<GPU_N; m++){
if(m != n)
cudaDeviceEnablePeerAccess(m,0);
}
cudaMalloc((void **) &f_inner_A_d[n], pitch*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) &f_inner_B_d[n], pitch*YDIM*zInner*19*sizeof(float));
cudaMalloc((void **) & g_A_d[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & g_B_d[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_A_d[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_B_d[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & g_temp[n], pitch*YDIM* 19*sizeof(float));
cudaMalloc((void **) & h_temp[n], pitch*YDIM* 19*sizeof(float));
//initialize host f_inner
for (i = 0; i < XDIM*YDIM*zInner*19; i++)
f_inner_h[n][i] = 0;
//initialize host g,h
for (i = 0; i < XDIM*YDIM*19; i++){
g_h[n][i] = 0;
h_h[n][i] = 0;
}
cudaMemcpy2D(f_inner_A_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D(f_inner_B_d[n],pitch,f_inner_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_A_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( g_B_d[n],pitch, g_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_A_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
cudaMemcpy2D( h_B_d[n],pitch, h_h[n],XDIM*sizeof(float),XDIM*sizeof(float),YDIM* 19,cudaMemcpyHostToDevice);
initialize_single<<<grid , threads>>>(f_inner_A_d[n],pitch_elements,GPU_N);
initialize_single<<<grid , threads>>>(f_inner_B_d[n],pitch_elements,GPU_N);
initialize_buffer<<<g_grid, threads>>>( g_A_d[n],pitch_elements);
initialize_buffer<<<g_grid, threads>>>( g_B_d[n],pitch_elements);
initialize_buffer<<<g_grid, threads>>>( h_A_d[n],pitch_elements);
initialize_buffer<<<g_grid, threads>>>( h_B_d[n],pitch_elements);
}//end Malloc and Initialize
struct timeval tdr0,tdr1;
double restime;
cudaDeviceSynchronize();
gettimeofday (&tdr0,NULL);
//Time loop
for(int t = 0; t<TMAX; t+=2){
//A->B
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpyPeerAsync(&h_temp[n][pitch_elements*YDIM*14],n,&g_A_d[ (n+1)%GPU_N][pitch_elements*YDIM*14], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_temp[n][pitch_elements*YDIM*9 ],n,&h_A_d[abs(n-1)%GPU_N][pitch_elements*YDIM*9 ],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*5,stream_halo[n]);
cudaStreamSynchronize(stream_halo[n]);
update_inner <<< grid, threads, 0, stream_inner[n]>>>(f_inner_A_d[n],f_inner_B_d[n], g_A_d[n], h_A_d[n],omega,pitch_elements,n,zInner);
update_top <<<h_grid, threads, 0, stream_halo [n]>>>( h_A_d[n], h_B_d[n],f_inner_A_d[n],h_temp[n],omega,pitch_elements,n,zInner);
update_bottom<<<h_grid, threads, 0, stream_halo [n]>>>( g_A_d[n], g_B_d[n],f_inner_A_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
cudaDeviceSynchronize();
//B->A
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpyPeerAsync(&h_temp[n][pitch_elements*YDIM*14],n,&g_B_d[ (n+1)%GPU_N][pitch_elements*YDIM*14], (n+1)%GPU_N,pitch_elements*YDIM*sizeof(float)*5,stream_halo[n]);
cudaMemcpyPeerAsync(&g_temp[n][pitch_elements*YDIM*9 ],n,&h_B_d[abs(n-1)%GPU_N][pitch_elements*YDIM*9 ],abs(n-1)%GPU_N,pitch_elements*YDIM*sizeof(float)*5,stream_halo[n]);
cudaStreamSynchronize(stream_halo[n]);
update_inner <<< grid, threads, 0, stream_inner[n]>>>(f_inner_B_d[n],f_inner_A_d[n], g_B_d[n], h_B_d[n],omega,pitch_elements,n,zInner);
update_top <<<h_grid, threads, 0, stream_halo [n]>>>( h_B_d[n], h_A_d[n],f_inner_B_d[n],h_temp[n],omega,pitch_elements,n,zInner);
update_bottom<<<h_grid, threads, 0, stream_halo [n]>>>( g_B_d[n], g_A_d[n],f_inner_B_d[n],g_temp[n],omega,pitch_elements,n,zInner);
}
cudaDeviceSynchronize();
}//end Time loop
cudaDeviceSynchronize();
gettimeofday (&tdr1,NULL);
timeval_subtract (&restime, &tdr1, &tdr0);
int Nodes;
Nodes = XDIM*YDIM*ZDIM;
cout<<"Time taken for main kernel: "<<restime<<" ("
<<double(Nodes*double(TMAX/1000000.f))/restime<<"MLUPS)\n";
output<<"VARIABLES = \"X\",\"Y\",\"Z\",\"u\",\"v\",\"w\",\"rho\",\"uAv\",\"vAv\",\"ufluc\",\"vfluc\"\n";
output<<"ZONE F=POINT, I="<<XDIM<<", J="<<YDIM<<", K="<<ZDIM<<"\n";
//D2H Memcpy and write results
for(int n = 0; n<GPU_N; n++){
cudaSetDevice(n);
cudaMemcpy2D(f_inner_h[n],XDIM*sizeof(float),f_inner_A_d[n],pitch,XDIM*sizeof(float),YDIM*zInner*19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( g_h[n],XDIM*sizeof(float), g_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,cudaMemcpyDeviceToHost);
cudaMemcpy2D( h_h[n],XDIM*sizeof(float), h_A_d[n],pitch,XDIM*sizeof(float),YDIM* 19,cudaMemcpyDeviceToHost);
//Write results
WriteResults( g_h[n],output,omega,ZDIM/GPU_N*n ,1 );
WriteResults(f_inner_h[n],output,omega,ZDIM/GPU_N*n+1 ,zInner);
WriteResults( h_h[n],output,omega,ZDIM/GPU_N*(n+1)-1,1 );
cudaFree(f_inner_A_d[n]);
cudaFree(f_inner_B_d[n]);
cudaFree( g_A_d[n]);
cudaFree( g_B_d[n]);
cudaFree( h_A_d[n]);
cudaFree( h_B_d[n]);
cudaFree( g_temp[n]);
cudaFree( h_temp[n]);
}//end write results
return(0);
}
|
7b7c8be9d8be9e9365863aefe4ac49177345c645.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) {
if (comp >= (var_1 - +0.0f)) {
if (comp >= (var_2 - var_3)) {
comp += +0.0f + var_5;
float tmp_1 = (var_6 / logf((var_7 - (var_8 + -1.1764E-16f))));
comp = tmp_1 + (var_9 - (+0.0f + (-1.3204E34f * (-1.3519E-42f - (-1.6717E34f / var_10)))));
for (int i=0; i < var_4; ++i) {
comp = logf((+1.9378E-36f + (var_11 * (var_12 - -1.3730E-42f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
hipLaunchKernelGGL(( compute), dim3(1),dim3(1), 0, 0, tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13);
hipDeviceSynchronize();
return 0;
}
| 7b7c8be9d8be9e9365863aefe4ac49177345c645.cu |
/* This is a automatically generated test. Do not modify */
#include <stdio.h>
#include <stdlib.h>
#include <math.h>
__global__
void compute(float comp, float var_1,float var_2,float var_3,int var_4,float var_5,float var_6,float var_7,float var_8,float var_9,float var_10,float var_11,float var_12) {
if (comp >= (var_1 - +0.0f)) {
if (comp >= (var_2 - var_3)) {
comp += +0.0f + var_5;
float tmp_1 = (var_6 / logf((var_7 - (var_8 + -1.1764E-16f))));
comp = tmp_1 + (var_9 - (+0.0f + (-1.3204E34f * (-1.3519E-42f - (-1.6717E34f / var_10)))));
for (int i=0; i < var_4; ++i) {
comp = logf((+1.9378E-36f + (var_11 * (var_12 - -1.3730E-42f))));
}
}
}
printf("%.17g\n", comp);
}
float* initPointer(float v) {
float *ret = (float*) malloc(sizeof(float)*10);
for(int i=0; i < 10; ++i)
ret[i] = v;
return ret;
}
int main(int argc, char** argv) {
/* Program variables */
float tmp_1 = atof(argv[1]);
float tmp_2 = atof(argv[2]);
float tmp_3 = atof(argv[3]);
float tmp_4 = atof(argv[4]);
int tmp_5 = atoi(argv[5]);
float tmp_6 = atof(argv[6]);
float tmp_7 = atof(argv[7]);
float tmp_8 = atof(argv[8]);
float tmp_9 = atof(argv[9]);
float tmp_10 = atof(argv[10]);
float tmp_11 = atof(argv[11]);
float tmp_12 = atof(argv[12]);
float tmp_13 = atof(argv[13]);
compute<<<1,1>>>(tmp_1,tmp_2,tmp_3,tmp_4,tmp_5,tmp_6,tmp_7,tmp_8,tmp_9,tmp_10,tmp_11,tmp_12,tmp_13);
cudaDeviceSynchronize();
return 0;
}
|
5b57e274aee67c59aee8d0897f3d38c37ed36ac2.hip | // !!! This is a file automatically generated by hipify!!!
/* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids
* https://doi.org/10.1103/PhysRevFluids.4.103701
*
* Yifei Guan, Igor Novosselov
* University of Washington
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hipfft.h>
#include "LBM.h"
#include <hip/device_functions.h>
#define RAD 1
__global__ void gpu_poisson(double*, double*,double*);
__global__ void gpu_efield(double*, double*, double*);
__global__ void odd_extension(double*, hipfftDoubleComplex*);
__global__ void gpu_derivative(double*, double*, hipfftDoubleComplex*);
__global__ void odd_extract(double*, hipfftDoubleComplex*);
__global__ void gpu_bc(double*);
__device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y)
{
return (2*RAD + nThreads)*y + x;
}
__host__
void poisson_phi(double *charge_gpu, double *phi_gpu)
{
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
unsigned int it = 0;
double MAX_ITERATIONS = 1.0E6;
double TOLERANCE = 1.0e-9;
double *Res = (double*)malloc(mem_size_scalar);
double error = 0.0;
double *R;
checkCudaErrors(hipMalloc((void**)&R, mem_size_scalar));
for (it = 0; it < MAX_ITERATIONS; ++it) {
error = 0.0;
gpu_poisson << < grid, threads >> > (charge_gpu, phi_gpu, R);
checkCudaErrors(hipMemcpy(Res, R, mem_size_scalar, hipMemcpyDeviceToHost));
for (unsigned int y = 0; y < NY; ++y) {
for (unsigned int x = 0; x < NX; ++x) {
//if (it % 1000 == 1) printf("%g\n", error);
if (error < Res[scalar_index(x, y)]) error = Res[scalar_index(x, y)];
}
}
if (error < TOLERANCE) break;
}
checkCudaErrors(hipFree(R));
free(Res);
//printf("%g\n", error);
if (it == MAX_ITERATIONS) {
printf("Poisson solver did not converge!\n");
printf("Residual = %g\n", error);
system("pause");
//exit(-1);
}
getLastCudaError("Poisson solver kernel error");
}
__global__ void gpu_poisson(double *c, double *fi,double *R){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int s_y = threadIdx.y + RAD;
unsigned int s_x = threadIdx.x + RAD;
unsigned int xp1 = (x + blockDim.x) % NX;
unsigned int yp1 = (y + blockDim.y) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
__shared__ double s_in[(2*RAD + nThreads)*3];
// load to shared memory (regular cells)
s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)];
// load halo cells
if (threadIdx.x < RAD) {
s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)];
s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)];
}
if (threadIdx.y < RAD) {
s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)];
s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)];
}
// Boundary conditions
if (y == 0) {
fi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY - 1) {
fi[gpu_scalar_index(x, y)] = 0.0;
return;
}
__syncthreads();
double charge = c[gpu_scalar_index(x, y)];
//double phi = fi[gpu_scalar_index(x, y)];
//double phiL = fi[gpu_scalar_index(xm1, y)];
//double phiR = fi[gpu_scalar_index(xp1, y)];
//double phiU = fi[gpu_scalar_index(x, yp1)];
//double phiD = fi[gpu_scalar_index(x, ym1)];
double phi = s_in[gpu_s_scalar_index(s_x, s_y)];
double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)];
double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)];
double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)];
double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)];
double source = (charge / eps) * dx *dx; // Right hand side of the equation
double phi_old = phi;
phi = 0.25 * (phiL + phiR + phiU + phiD + source);
// Record the error
R[gpu_scalar_index(x, y)] = fabs(phi - phi_old);
//__syncthreads();
fi[gpu_scalar_index(x, y)] = phi;
//if (x == 5 && y == 5) printf("%g\n", phi);
}
__host__
void efield(double *phi_gpu, double *Ex_gpu, double *Ey_gpu) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_efield << < grid, threads >> > (phi_gpu, Ex_gpu, Ey_gpu);
gpu_bc << <grid, threads >> > (Ey_gpu);
getLastCudaError("Efield kernel error");
}
__global__ void gpu_efield(double *fi, double *ex, double *ey){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int xp1 = (x + 1) % NX;
unsigned int yp1 = (y + 1) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
double phi = fi[gpu_scalar_index(x, y)];
double phiL = fi[gpu_scalar_index(xm1, y)];
double phiR = fi[gpu_scalar_index(xp1, y)];
double phiU = fi[gpu_scalar_index(x, yp1)];
double phiD = fi[gpu_scalar_index(x, ym1)];
ex[gpu_scalar_index(x, y)] = 0.5*(phiL - phiR) / dx;
ey[gpu_scalar_index(x, y)] = 0.5*(phiD - phiU) / dy;
}
__global__ void gpu_bc(double *ey) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
//ex[gpu_scalar_index(x, 0)] = ex[gpu_scalar_index(x, 1)];
ey[gpu_scalar_index(x, 0)] = ey[gpu_scalar_index(x, 1)];
return;
}
if (y == NY - 1) {
//ex[gpu_scalar_index(x, NY - 1)] = ex[gpu_scalar_index(x, NY - 2)];
ey[gpu_scalar_index(x, NY - 1)] = ey[gpu_scalar_index(x, NY - 2)];
return;
}
}
// =========================================================================
// Fast poisson solver domain extension
// =========================================================================
__host__ void extension(double *c, hipfftDoubleComplex *c_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extension << < grid, threads >> > (c, c_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extension(double *charge, hipfftDoubleComplex *charge_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps - voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > 1 && y < NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > NY - 1 && y<NE-1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, NE - y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NE - 1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, 1)] / eps + voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
}
__host__ void derivative(double *kx, double *ky, hipfftDoubleComplex *source) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_derivative << < grid, threads >> > (kx, ky, source);
getLastCudaError("Gpu derivative error");
}
__global__ void gpu_derivative(double *kx, double *ky, hipfftDoubleComplex *source) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
double I = kx[x];
double J = ky[y];
double mu = (4.0 / dy / dy)*(sin(J*dy*0.5)*sin(J*dy*0.5)) + I*I;
if (y == 0 && x == 0) mu = 1.0;
source[gpu_scalar_index(x, y)].x = -source[gpu_scalar_index(x, y)].x / mu;
source[gpu_scalar_index(x, y)].y = -source[gpu_scalar_index(x, y)].y / mu;
}
__host__ void extract(double *fi, hipfftDoubleComplex *fi_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extract << < grid, threads >> > (fi, fi_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extract(double *phi, hipfftDoubleComplex *phi_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
phi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY-1) {
phi[gpu_scalar_index(x, y)] = 0.0;
return;
}
phi[gpu_scalar_index(x, y)] = phi_ext[gpu_scalar_index(x, y)].x/SIZE;
}
| 5b57e274aee67c59aee8d0897f3d38c37ed36ac2.cu | /* This code accompanies
* Two relaxation time lattice Boltzmann method coupled to fast Fourier transform Poisson solver: Application to electroconvective flow, Journal of Computational Physics
* https://doi.org/10.1016/j.jcp.2019.07.029
* Numerical analysis of electroconvection in cross-flow with unipolar charge injection, Physical Review Fluids
* https://doi.org/10.1103/PhysRevFluids.4.103701
*
* Yifei Guan, Igor Novosselov
* University of Washington
*
* Author: Yifei Guan
*
*/
#define _CRT_SECURE_NO_WARNINGS
#include <stdio.h>
#include <stdlib.h>
#define _USE_MATH_DEFINES
#include <math.h>
#include <cuda_runtime.h>
#include <cuda.h>
#include <cufft.h>
#include "LBM.h"
#include <device_functions.h>
#define RAD 1
__global__ void gpu_poisson(double*, double*,double*);
__global__ void gpu_efield(double*, double*, double*);
__global__ void odd_extension(double*, cufftDoubleComplex*);
__global__ void gpu_derivative(double*, double*, cufftDoubleComplex*);
__global__ void odd_extract(double*, cufftDoubleComplex*);
__global__ void gpu_bc(double*);
__device__ __forceinline__ size_t gpu_s_scalar_index(unsigned int x, unsigned int y)
{
return (2*RAD + nThreads)*y + x;
}
__host__
void poisson_phi(double *charge_gpu, double *phi_gpu)
{
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
unsigned int it = 0;
double MAX_ITERATIONS = 1.0E6;
double TOLERANCE = 1.0e-9;
double *Res = (double*)malloc(mem_size_scalar);
double error = 0.0;
double *R;
checkCudaErrors(cudaMalloc((void**)&R, mem_size_scalar));
for (it = 0; it < MAX_ITERATIONS; ++it) {
error = 0.0;
gpu_poisson << < grid, threads >> > (charge_gpu, phi_gpu, R);
checkCudaErrors(cudaMemcpy(Res, R, mem_size_scalar, cudaMemcpyDeviceToHost));
for (unsigned int y = 0; y < NY; ++y) {
for (unsigned int x = 0; x < NX; ++x) {
//if (it % 1000 == 1) printf("%g\n", error);
if (error < Res[scalar_index(x, y)]) error = Res[scalar_index(x, y)];
}
}
if (error < TOLERANCE) break;
}
checkCudaErrors(cudaFree(R));
free(Res);
//printf("%g\n", error);
if (it == MAX_ITERATIONS) {
printf("Poisson solver did not converge!\n");
printf("Residual = %g\n", error);
system("pause");
//exit(-1);
}
getLastCudaError("Poisson solver kernel error");
}
__global__ void gpu_poisson(double *c, double *fi,double *R){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int s_y = threadIdx.y + RAD;
unsigned int s_x = threadIdx.x + RAD;
unsigned int xp1 = (x + blockDim.x) % NX;
unsigned int yp1 = (y + blockDim.y) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
__shared__ double s_in[(2*RAD + nThreads)*3];
// load to shared memory (regular cells)
s_in[gpu_s_scalar_index(s_x,s_y)] = fi[gpu_scalar_index(x, y)];
// load halo cells
if (threadIdx.x < RAD) {
s_in[gpu_s_scalar_index(s_x - RAD, s_y)] = fi[gpu_scalar_index(xm1, y)];
s_in[gpu_s_scalar_index(s_x + blockDim.x, s_y)] = fi[gpu_scalar_index(xp1, y)];
}
if (threadIdx.y < RAD) {
s_in[gpu_s_scalar_index(s_x, s_y - RAD)] = fi[gpu_scalar_index(x, ym1)];
s_in[gpu_s_scalar_index(s_x, s_y + blockDim.y)] = fi[gpu_scalar_index(x, yp1)];
}
// Boundary conditions
if (y == 0) {
fi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY - 1) {
fi[gpu_scalar_index(x, y)] = 0.0;
return;
}
__syncthreads();
double charge = c[gpu_scalar_index(x, y)];
//double phi = fi[gpu_scalar_index(x, y)];
//double phiL = fi[gpu_scalar_index(xm1, y)];
//double phiR = fi[gpu_scalar_index(xp1, y)];
//double phiU = fi[gpu_scalar_index(x, yp1)];
//double phiD = fi[gpu_scalar_index(x, ym1)];
double phi = s_in[gpu_s_scalar_index(s_x, s_y)];
double phiL = s_in[gpu_s_scalar_index(s_x-1, s_y)];
double phiR = s_in[gpu_s_scalar_index(s_x+1, s_y)];
double phiU = s_in[gpu_s_scalar_index(s_x, s_y+1)];
double phiD = s_in[gpu_s_scalar_index(s_x, s_y-1)];
double source = (charge / eps) * dx *dx; // Right hand side of the equation
double phi_old = phi;
phi = 0.25 * (phiL + phiR + phiU + phiD + source);
// Record the error
R[gpu_scalar_index(x, y)] = fabs(phi - phi_old);
//__syncthreads();
fi[gpu_scalar_index(x, y)] = phi;
//if (x == 5 && y == 5) printf("%g\n", phi);
}
__host__
void efield(double *phi_gpu, double *Ex_gpu, double *Ey_gpu) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_efield << < grid, threads >> > (phi_gpu, Ex_gpu, Ey_gpu);
gpu_bc << <grid, threads >> > (Ey_gpu);
getLastCudaError("Efield kernel error");
}
__global__ void gpu_efield(double *fi, double *ex, double *ey){
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
unsigned int xp1 = (x + 1) % NX;
unsigned int yp1 = (y + 1) % NY;
unsigned int xm1 = (NX + x - 1) % NX;
unsigned int ym1 = (NY + y - 1) % NY;
double phi = fi[gpu_scalar_index(x, y)];
double phiL = fi[gpu_scalar_index(xm1, y)];
double phiR = fi[gpu_scalar_index(xp1, y)];
double phiU = fi[gpu_scalar_index(x, yp1)];
double phiD = fi[gpu_scalar_index(x, ym1)];
ex[gpu_scalar_index(x, y)] = 0.5*(phiL - phiR) / dx;
ey[gpu_scalar_index(x, y)] = 0.5*(phiD - phiU) / dy;
}
__global__ void gpu_bc(double *ey) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
//ex[gpu_scalar_index(x, 0)] = ex[gpu_scalar_index(x, 1)];
ey[gpu_scalar_index(x, 0)] = ey[gpu_scalar_index(x, 1)];
return;
}
if (y == NY - 1) {
//ex[gpu_scalar_index(x, NY - 1)] = ex[gpu_scalar_index(x, NY - 2)];
ey[gpu_scalar_index(x, NY - 1)] = ey[gpu_scalar_index(x, NY - 2)];
return;
}
}
// =========================================================================
// Fast poisson solver domain extension
// =========================================================================
__host__ void extension(double *c, cufftDoubleComplex *c_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extension << < grid, threads >> > (c, c_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extension(double *charge, cufftDoubleComplex *charge_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps - voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > 1 && y < NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = -charge[gpu_scalar_index(x, y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NY - 1) {
charge_ext[gpu_scalar_index(x, y)].x = 0.0;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y > NY - 1 && y<NE-1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, NE - y)] / eps;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
if (y == NE - 1) {
charge_ext[gpu_scalar_index(x, y)].x = charge[gpu_scalar_index(x, 1)] / eps + voltage / dy / dy;
charge_ext[gpu_scalar_index(x, y)].y = 0.0;
return;
}
}
__host__ void derivative(double *kx, double *ky, cufftDoubleComplex *source) {
// blocks in grid
dim3 grid(NX / nThreads, NE, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
gpu_derivative << < grid, threads >> > (kx, ky, source);
getLastCudaError("Gpu derivative error");
}
__global__ void gpu_derivative(double *kx, double *ky, cufftDoubleComplex *source) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
double I = kx[x];
double J = ky[y];
double mu = (4.0 / dy / dy)*(sin(J*dy*0.5)*sin(J*dy*0.5)) + I*I;
if (y == 0 && x == 0) mu = 1.0;
source[gpu_scalar_index(x, y)].x = -source[gpu_scalar_index(x, y)].x / mu;
source[gpu_scalar_index(x, y)].y = -source[gpu_scalar_index(x, y)].y / mu;
}
__host__ void extract(double *fi, cufftDoubleComplex *fi_ext) {
// blocks in grid
dim3 grid(NX / nThreads, NY, 1);
// threads in block
dim3 threads(nThreads, 1, 1);
odd_extract << < grid, threads >> > (fi, fi_ext);
getLastCudaError("Odd Extension error");
}
__global__ void odd_extract(double *phi, cufftDoubleComplex *phi_ext) {
unsigned int y = blockIdx.y;
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
if (y == 0) {
phi[gpu_scalar_index(x, y)] = voltage;
return;
}
if (y == NY-1) {
phi[gpu_scalar_index(x, y)] = 0.0;
return;
}
phi[gpu_scalar_index(x, y)] = phi_ext[gpu_scalar_index(x, y)].x/SIZE;
}
|
d2fc794b7701d73c1c202d6309c2c744325294dd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include "Vector3_hip.cuh"
#include "Ray.cuh"
//TODO: Cleanup to use Vector3 objects for (u,v,w) cam direction,
__global__ void generateRays(float * u, float * v, float * w, float * eyePoint, float * cameraDirection, float viewPlaneDistance, float horizontalRaySpacing, float verticalRaySpacing, mint hres, mint vres, mint pixelCount, mint orthographic, mint sampleCountRoot, float * randomValues, mint randomCount, Ray * rays) {
int pixelIndex = threadIdx.x + blockIdx.x*blockDim.x;
if (pixelIndex < pixelCount) {
float * etp = (float*)malloc(3 * sizeof(float)); // Vector from the Eye To Pixel of this ray
// Get the screen space coordinates (col,row)
int col = pixelIndex % hres;
int row = pixelIndex / vres; // Integer division
int sampleCount = sampleCountRoot * sampleCountRoot;
for (int sampleIndex = 0; sampleIndex < sampleCount; sampleIndex++) {
// Get the view space coordinates for the pixel (xv, yv, zv)
float xv = horizontalRaySpacing * (col - hres/2.0);
float yv = verticalRaySpacing * (row - vres/2.0);
float zv = -viewPlaneDistance; // Distance from the eye point to the view plane
// Offset the view space coordinates by random amount in the section specified by the current sample
int subCol = sampleIndex % sampleCountRoot;
int subRow = sampleIndex / sampleCountRoot; // Integer Division
float hSubSpacing = horizontalRaySpacing / sampleCountRoot;
float vSubSpacing = verticalRaySpacing / sampleCountRoot;
// float subSpacing = raySpacing / sampleCountRoot;
// TODO: Have the initial xv and yv not have the +0.5 to put in center. X DONE, keeping this in as reminder
xv += subCol * hSubSpacing;
yv += subRow * vSubSpacing;
// randX and randY are pre-computed random values between 0-1.
float randX = *(randomValues + ((pixelIndex + sampleIndex) % randomCount));
float randY = *(randomValues + ((pixelIndex + sampleIndex+13) % randomCount));
xv += hSubSpacing * randX;
yv += vSubSpacing * randY;
// Get the vectors from the eye to the pixel in world space
// using the orthonormal basis (u,v,w) to convert the view space coordinates.
// Vector3 eyeToPixel = Vector3( xv*u[0] + yv*v[0] + zv*w[0],
// xv*u[1] + yv*v[1] + zv*w[1],
// xv*u[2] + yv*v[2] + zv*w[2]
// );
etp[0] = xv*u[0] + yv*v[0] + zv*w[0];
etp[1] = xv*u[1] + yv*v[1] + zv*w[1];
etp[2] = xv*u[2] + yv*v[2] + zv*w[2];
// float * origin = rayOrigins + pixelIndex*3*sampleCount + sampleIndex*3;
// float * direction = rayDirections + pixelIndex*3*sampleCount + sampleIndex*3;
Ray * ray = rays + pixelIndex*sampleCount + sampleIndex;
Vector3 origin;
Vector3 direction;
if (orthographic) {
// Origins should be each individual pixel's position
// Directions should all be the camera direction
// Pixel position = eye point + eye-to-pixel vector
// origin[0] = eyePoint[0] + etp[0];
// origin[1] = eyePoint[1] + etp[1];
// origin[2] = eyePoint[2] + etp[2];
origin = Vector3();
origin.x = eyePoint[0] + etp[0];
origin.y = eyePoint[1] + etp[1];
origin.z = eyePoint[2] + etp[2];
// Direction = Camera Direciton
// direction[0] = cameraDirection[0];
// direction[1] = cameraDirection[1];
// direction[2] = cameraDirection[2];
direction = Vector3();
direction.x = cameraDirection[0];
direction.y = cameraDirection[1];
direction.z = cameraDirection[2];
} else {
// Origins are all the same (eye point)
// Directions are from the eye point to the pixel (normalized)
// Origin = Eye Point
// origin[0] = eyePoint[0];
// origin[1] = eyePoint[1];
// origin[2] = eyePoint[2];
origin = Vector3();
origin.x = eyePoint[0];
origin.y = eyePoint[1];
origin.z = eyePoint[2];
// Direction = Normalized Eye-To-Pixel Vector
float magnitude = sqrtf(etp[0]*etp[0] + etp[1]*etp[1] + etp[2]*etp[2]);
// direction[0] = etp[0] / magnitude;
// direction[1] = etp[1] / magnitude;
// direction[2] = etp[2] / magnitude;
direction = Vector3();
direction.x = etp[0] / magnitude;
direction.y = etp[1] / magnitude;
direction.z = etp[2] / magnitude;
}
*ray = Ray(origin, direction);
}
free(etp);
}
}
| d2fc794b7701d73c1c202d6309c2c744325294dd.cu | #include <math.h>
#include "Vector3.cuh"
#include "Ray.cuh"
//TODO: Cleanup to use Vector3 objects for (u,v,w) cam direction,
__global__ void generateRays(float * u, float * v, float * w, float * eyePoint, float * cameraDirection, float viewPlaneDistance, float horizontalRaySpacing, float verticalRaySpacing, mint hres, mint vres, mint pixelCount, mint orthographic, mint sampleCountRoot, float * randomValues, mint randomCount, Ray * rays) {
int pixelIndex = threadIdx.x + blockIdx.x*blockDim.x;
if (pixelIndex < pixelCount) {
float * etp = (float*)malloc(3 * sizeof(float)); // Vector from the Eye To Pixel of this ray
// Get the screen space coordinates (col,row)
int col = pixelIndex % hres;
int row = pixelIndex / vres; // Integer division
int sampleCount = sampleCountRoot * sampleCountRoot;
for (int sampleIndex = 0; sampleIndex < sampleCount; sampleIndex++) {
// Get the view space coordinates for the pixel (xv, yv, zv)
float xv = horizontalRaySpacing * (col - hres/2.0);
float yv = verticalRaySpacing * (row - vres/2.0);
float zv = -viewPlaneDistance; // Distance from the eye point to the view plane
// Offset the view space coordinates by random amount in the section specified by the current sample
int subCol = sampleIndex % sampleCountRoot;
int subRow = sampleIndex / sampleCountRoot; // Integer Division
float hSubSpacing = horizontalRaySpacing / sampleCountRoot;
float vSubSpacing = verticalRaySpacing / sampleCountRoot;
// float subSpacing = raySpacing / sampleCountRoot;
// TODO: Have the initial xv and yv not have the +0.5 to put in center. X DONE, keeping this in as reminder
xv += subCol * hSubSpacing;
yv += subRow * vSubSpacing;
// randX and randY are pre-computed random values between 0-1.
float randX = *(randomValues + ((pixelIndex + sampleIndex) % randomCount));
float randY = *(randomValues + ((pixelIndex + sampleIndex+13) % randomCount));
xv += hSubSpacing * randX;
yv += vSubSpacing * randY;
// Get the vectors from the eye to the pixel in world space
// using the orthonormal basis (u,v,w) to convert the view space coordinates.
// Vector3 eyeToPixel = Vector3( xv*u[0] + yv*v[0] + zv*w[0],
// xv*u[1] + yv*v[1] + zv*w[1],
// xv*u[2] + yv*v[2] + zv*w[2]
// );
etp[0] = xv*u[0] + yv*v[0] + zv*w[0];
etp[1] = xv*u[1] + yv*v[1] + zv*w[1];
etp[2] = xv*u[2] + yv*v[2] + zv*w[2];
// float * origin = rayOrigins + pixelIndex*3*sampleCount + sampleIndex*3;
// float * direction = rayDirections + pixelIndex*3*sampleCount + sampleIndex*3;
Ray * ray = rays + pixelIndex*sampleCount + sampleIndex;
Vector3 origin;
Vector3 direction;
if (orthographic) {
// Origins should be each individual pixel's position
// Directions should all be the camera direction
// Pixel position = eye point + eye-to-pixel vector
// origin[0] = eyePoint[0] + etp[0];
// origin[1] = eyePoint[1] + etp[1];
// origin[2] = eyePoint[2] + etp[2];
origin = Vector3();
origin.x = eyePoint[0] + etp[0];
origin.y = eyePoint[1] + etp[1];
origin.z = eyePoint[2] + etp[2];
// Direction = Camera Direciton
// direction[0] = cameraDirection[0];
// direction[1] = cameraDirection[1];
// direction[2] = cameraDirection[2];
direction = Vector3();
direction.x = cameraDirection[0];
direction.y = cameraDirection[1];
direction.z = cameraDirection[2];
} else {
// Origins are all the same (eye point)
// Directions are from the eye point to the pixel (normalized)
// Origin = Eye Point
// origin[0] = eyePoint[0];
// origin[1] = eyePoint[1];
// origin[2] = eyePoint[2];
origin = Vector3();
origin.x = eyePoint[0];
origin.y = eyePoint[1];
origin.z = eyePoint[2];
// Direction = Normalized Eye-To-Pixel Vector
float magnitude = sqrtf(etp[0]*etp[0] + etp[1]*etp[1] + etp[2]*etp[2]);
// direction[0] = etp[0] / magnitude;
// direction[1] = etp[1] / magnitude;
// direction[2] = etp[2] / magnitude;
direction = Vector3();
direction.x = etp[0] / magnitude;
direction.y = etp[1] / magnitude;
direction.z = etp[2] / magnitude;
}
*ray = Ray(origin, direction);
}
free(etp);
}
}
|
49d8f0ed6913add45a834d00e8ea3cb84ed20819.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// System includes
#include <iostream>
#include <assert.h>
#include <math.h>
#include "operators.hpp"
#include "operators.cuh"
#include <time.h>
#define NUM_SLAVES 5
#define CARD_SUPPORT_SET 20
#define NUM_FEATURES 11
#define NUM_SAMPLES 20000
// to compute local summary (running on GPU)
__global__ void slave_local(int N, float *S, float *D, float *yD, float *local_M, float *local_C){
float *SD, *DD, *DS, *SS;
float *inv_DD_S = new float[N*N];
float **a = new float*[4];
float **b = new float*[4];
float **out = new float*[4];
// set A and B samples for parallel execution
a[0] = S; b[0] = D;
a[1] = D; b[1] = D;
a[2] = D; b[2] = S;
a[3] = S; b[3] = S;
// execute 4 covariance functions in parallel using 4 blocks with N threads
hipLaunchKernelGGL(( cov), dim3(4),dim3(N), 0, 0, a, b, N, out);
// get outputs of covariance functions
SD = out[0];
DD = out[1];
DS = out[2];
SS = out[3];
// calculate local summary (using CuBLAS)
hipblasHandle_t handle;
hipblasStatus_t stat = hipblasCreate(&handle);
float alpha = 1.0f;
float beta = 0.0f;
int size = N*N;
float *inv_SS = new float[N*N];
float *DD_S = new float[N*N];
hipLaunchKernelGGL(( inv), dim3(1),dim3(1), 0, 0, SS, N, inv_SS);
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N,N,N, &alpha, DS, N, inv_SS, N, &beta, DD_S, N);
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N,N,N, &alpha, DD_S, N, SD, N, &beta, inv_DD_S, N);
alpha = -1.0;
hipblasSaxpy(handle, size, &alpha, inv_DD_S, 1, DD, 1);
hipLaunchKernelGGL(( inv), dim3(1),dim3(1), 0, 0, DD, N, inv_DD_S);
alpha = 1.0;
// compute local mean (using CuBLAS)
float *local_M_temp = new float[N*N];
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N,N,N, &alpha, SD, N, inv_DD_S, N, &beta, local_M_temp, N);
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N,1,N, &alpha, local_M_temp, N, yD, N, &beta, local_M, N);
// compute local covariance (using CuBLAS)
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N,N,N, &alpha, local_M_temp, N, DS, N, &beta, local_C, N);
// free memory in device
free(a); free(b); free(out);
free(inv_SS);
free(DD_S);
free(local_M_temp);
}
// to calculate for global summary (running on GPU)
__global__ void slave_global(int N, float *S, float *U, float *global_C, float *global_M, float *pred_mean) {
float **a = new float*[1];
float **b = new float*[1];
float **out = new float*[1];
// computation for UU, SU, UD, DU is skipped since we do not need them for prediction mean
a[0] = U; b[0] = S;
// execute 1 covariance function with N parallel threads
hipLaunchKernelGGL(( cov), dim3(1),dim3(N), 0, 0, a, b, N, out);
// get the output
float *US = out[0];
// calculate for prediction mean (using CuBLAS)
hipblasHandle_t handle;
hipblasStatus_t stat = hipblasCreate(&handle);
float alpha = 1.0;
float beta = 0.0f;
float *inv_global_C = new float[N*N];
hipLaunchKernelGGL(( inv), dim3(1),dim3(1), 0, 0, global_C, N, inv_global_C);
// predictions stored in pred_mean
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N,N,N, &alpha, US, N, inv_global_C, N, &beta, pred_mean, N);
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, N,N,N, &alpha, pred_mean, N, global_M, N, &beta, pred_mean, N);
}
// master runs on CPU
void master(mat S, float* pred, int* partition, mat train_data, mat train_target, mat test_data, mat test_target, int interval) {
int slaveCount;
float *S_set = matToArray(S);
float *global_M = new float[interval];
float *global_C = new float[interval*interval];
float **train_data_arr = new float*[NUM_SLAVES];
float **train_target_arr = new float*[NUM_SLAVES];
float **test_data_arr = new float*[NUM_SLAVES];
float **local_M_arr = new float*[NUM_SLAVES];
float **local_C_arr = new float*[NUM_SLAVES];
for(int i=0;i<NUM_SLAVES;i++){
local_M_arr[i] = new float[interval];
local_C_arr[i] = new float[interval*interval];
}
hipStream_t streams[NUM_SLAVES];
int s = sizeof(float);
// device copies
float *d_support, *d_train_data, *d_train_target, *d_test_data, *local_M, *local_C;
// allocate space for device copies
hipMalloc((void **)&d_support, CARD_SUPPORT_SET*NUM_FEATURES*s);
hipMalloc((void **)&d_train_data, NUM_SLAVES*interval*NUM_FEATURES*s);
hipMalloc((void **)&d_train_target, NUM_SLAVES*interval*1*s);
hipMalloc((void **)&d_test_data, NUM_SLAVES*interval*NUM_FEATURES*s);
hipMalloc((void **)&local_M, NUM_SLAVES*interval*1*s);
hipMalloc((void **)&local_C, NUM_SLAVES*interval*interval*s);
// copy common support set to device memory first
hipMemcpy(d_support, S_set, CARD_SUPPORT_SET*NUM_FEATURES*s, hipMemcpyHostToDevice);
for (slaveCount=0; slaveCount < NUM_SLAVES; slaveCount++){
// split data for each slave
train_data_arr[slaveCount] = matToArray(train_data.rows(slaveCount*interval, (slaveCount+1)*interval-1));
train_target_arr[slaveCount] = matToArray(train_target.rows(slaveCount*interval, (slaveCount+1)*interval-1));
test_data_arr[slaveCount] = matToArray(test_data.rows(slaveCount*interval, (slaveCount+1)*interval-1));
// copy the data for train, target and test into device memory
hipMemcpy(&d_train_data[slaveCount*(interval*NUM_FEATURES)], train_data_arr[slaveCount], interval*NUM_FEATURES*s, hipMemcpyHostToDevice);
hipMemcpy(&d_train_target[slaveCount*(interval*1)], train_target_arr[slaveCount], interval*1*s, hipMemcpyHostToDevice);
hipMemcpy(&d_test_data[slaveCount*(interval*NUM_FEATURES)], test_data_arr[slaveCount], interval*NUM_FEATURES*s, hipMemcpyHostToDevice);
}
// start NUM_SLAVES workers to calculate for local summary
for (slaveCount = 0; slaveCount < NUM_SLAVES; slaveCount++) {
// create new stream for parallel grid execution
hipStreamCreate(&streams[slaveCount]);
// launch one worker(slave) kernel per stream
hipLaunchKernelGGL(( slave_local), dim3(1), dim3(1), 0, streams[slaveCount], partition[slaveCount], d_support,
&d_train_data[slaveCount*(interval*NUM_FEATURES)],
&d_train_target[slaveCount*(interval*1)],
&local_M[slaveCount*(interval*1)],
&local_C[slaveCount*(interval*interval)]);
}
// synchronice all streams
for(int i=0; i<NUM_SLAVES; i++){
hipStreamSynchronize(streams[i]);
}
// Copy result back to host
for (int slaveCount=0; slaveCount<NUM_SLAVES; slaveCount++){
hipMemcpy(local_M_arr[slaveCount], &local_M[slaveCount*(interval*1)], interval*1*s, hipMemcpyDeviceToHost);
hipMemcpy(local_C_arr[slaveCount], &local_C[slaveCount*(interval*interval)], interval*interval*s, hipMemcpyDeviceToHost);
}
// free device memory
hipFree(d_train_data);
hipFree(d_train_target);
// sum up local summary to get global summary
for (slaveCount = 0; slaveCount < NUM_SLAVES; slaveCount++) {
for(int i=0; i<interval; i++){
global_M[i] += local_M_arr[slaveCount][i];
for(int j=0; j<interval; j++){
global_C[i*interval+j] += local_C_arr[slaveCount][i*interval+j];
}
}
}
// initialize variables for global summary to be copied to device
float *d_global_M, *d_global_C;
float *d_pred_M;
interval = (NUM_SAMPLES-NUM_SAMPLES/2)/NUM_SLAVES;
// allocate space for global summaries on device
hipMalloc((void **)&d_global_M, interval*NUM_FEATURES*s);
hipMalloc((void **)&d_global_C, interval*interval*s);
hipMalloc((void **)&d_pred_M, NUM_SAMPLES/3*s);
// copy global summaries to device
hipMemcpy(d_global_M, global_M, interval*NUM_FEATURES*s, hipMemcpyHostToDevice);
hipMemcpy(d_global_C, global_C, interval*interval*s, hipMemcpyHostToDevice);
// calculate for final prediction
for (slaveCount = 0; slaveCount < NUM_SLAVES; slaveCount++) {
// launch one worker(slave) kernel per stream (reuse stream from previous)
hipLaunchKernelGGL(( slave_global), dim3(1), dim3(1), 0, streams[slaveCount], interval, d_support,
&d_test_data[slaveCount*interval*NUM_FEATURES],
d_global_M, d_global_C, &d_pred_M[slaveCount*(interval)]);
}
// synchronice all streams
for(int i=0; i<NUM_SLAVES; i++){
hipStreamSynchronize(streams[i]);
}
// synchronice all device functions
hipDeviceSynchronize();
// Copy prediciton result back to host
for (int slaveCount=0; slaveCount<NUM_SLAVES; slaveCount++){
hipMemcpy(&pred[slaveCount], &d_pred_M[slaveCount*(interval)], interval*sizeof(float), hipMemcpyDeviceToHost);
}
// Cleanup
hipFree(d_support);
hipFree(d_test_data);
hipFree(local_C);
hipFree(d_global_M);
hipFree(d_global_C);
hipFree(d_pred_M);
// results are in pred (float* pred)
}
// main runs on CPU
int main(void){
clock_t start = clock();
// load data from csv file
std::string path = "../hdb.csv";
mat data = parseCsvFile(path, NUM_SAMPLES);
// normalise the dataset
int rows = data.n_rows;
int columns = data.n_cols;
mat Max = max(data, 0);
mat Min = min(data, 0);
for(int i=0;i<rows;i++){
// ignore the last target column
for(int j=1;j<columns; j++){
data(i,j) = (data(i,j)-Min(0, j))/Max(0, j);
}
}
// split data into training and testing samples
int all_samples = data.n_rows;
mat train_data = data.rows(0, all_samples/2-1).cols(1, 11);
mat train_target = data.rows(0, all_samples/2-1).col(0);
mat test_data = data.rows(all_samples/2, all_samples-1).cols(1, 11);
mat test_target = data.rows(all_samples/2, all_samples-1).col(0);
float *pred = new float[all_samples-all_samples/2];
// get the support data set and partitions of training data set
mat support;
int partitions[NUM_SLAVES+1];
int intervals = all_samples/(2*NUM_SLAVES);
for(int i=0;i<NUM_SLAVES;i++){
partitions[i+1] = all_samples/(2*NUM_SLAVES);
int idx = i*intervals;
for(int j=0;j<CARD_SUPPORT_SET/NUM_SLAVES;j++){
support.insert_rows(0, train_data.row(idx+j));
}
}
// call master function (execute on CPU) to start slaves (working on GPU)
master(support, pred, partitions, train_data, train_target, test_data, test_target, intervals);
clock_t end = clock();
double time_spent= (double)(end-start) / CLOCKS_PER_SEC;
printf("Total time for %d slaves to execute %d samples: %f\n", NUM_SLAVES, NUM_SAMPLES, time_spent);
// for printing out predictions in pred
/*
for(int i = 0; i < (all_samples-all_samples/2); i++){
cout << pred[i] << "(" << test_target(i, 0) << ")" << "\t";
if(i%10==0 && i!=0){
cout<<endl;
}
}
*/
return(0);
}
| 49d8f0ed6913add45a834d00e8ea3cb84ed20819.cu | // System includes
#include <iostream>
#include <assert.h>
#include <math.h>
#include "operators.hpp"
#include "operators.cuh"
#include <time.h>
#define NUM_SLAVES 5
#define CARD_SUPPORT_SET 20
#define NUM_FEATURES 11
#define NUM_SAMPLES 20000
// to compute local summary (running on GPU)
__global__ void slave_local(int N, float *S, float *D, float *yD, float *local_M, float *local_C){
float *SD, *DD, *DS, *SS;
float *inv_DD_S = new float[N*N];
float **a = new float*[4];
float **b = new float*[4];
float **out = new float*[4];
// set A and B samples for parallel execution
a[0] = S; b[0] = D;
a[1] = D; b[1] = D;
a[2] = D; b[2] = S;
a[3] = S; b[3] = S;
// execute 4 covariance functions in parallel using 4 blocks with N threads
cov<<<4,N>>>(a, b, N, out);
// get outputs of covariance functions
SD = out[0];
DD = out[1];
DS = out[2];
SS = out[3];
// calculate local summary (using CuBLAS)
cublasHandle_t handle;
cublasStatus_t stat = cublasCreate(&handle);
float alpha = 1.0f;
float beta = 0.0f;
int size = N*N;
float *inv_SS = new float[N*N];
float *DD_S = new float[N*N];
inv<<<1,1>>>(SS, N, inv_SS);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N,N,N, &alpha, DS, N, inv_SS, N, &beta, DD_S, N);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N,N,N, &alpha, DD_S, N, SD, N, &beta, inv_DD_S, N);
alpha = -1.0;
cublasSaxpy(handle, size, &alpha, inv_DD_S, 1, DD, 1);
inv<<<1,1>>>(DD, N, inv_DD_S);
alpha = 1.0;
// compute local mean (using CuBLAS)
float *local_M_temp = new float[N*N];
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N,N,N, &alpha, SD, N, inv_DD_S, N, &beta, local_M_temp, N);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N,1,N, &alpha, local_M_temp, N, yD, N, &beta, local_M, N);
// compute local covariance (using CuBLAS)
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N,N,N, &alpha, local_M_temp, N, DS, N, &beta, local_C, N);
// free memory in device
free(a); free(b); free(out);
free(inv_SS);
free(DD_S);
free(local_M_temp);
}
// to calculate for global summary (running on GPU)
__global__ void slave_global(int N, float *S, float *U, float *global_C, float *global_M, float *pred_mean) {
float **a = new float*[1];
float **b = new float*[1];
float **out = new float*[1];
// computation for UU, SU, UD, DU is skipped since we do not need them for prediction mean
a[0] = U; b[0] = S;
// execute 1 covariance function with N parallel threads
cov<<<1,N>>>(a, b, N, out);
// get the output
float *US = out[0];
// calculate for prediction mean (using CuBLAS)
cublasHandle_t handle;
cublasStatus_t stat = cublasCreate(&handle);
float alpha = 1.0;
float beta = 0.0f;
float *inv_global_C = new float[N*N];
inv<<<1,1>>>(global_C, N, inv_global_C);
// predictions stored in pred_mean
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N,N,N, &alpha, US, N, inv_global_C, N, &beta, pred_mean, N);
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, N,N,N, &alpha, pred_mean, N, global_M, N, &beta, pred_mean, N);
}
// master runs on CPU
void master(mat S, float* pred, int* partition, mat train_data, mat train_target, mat test_data, mat test_target, int interval) {
int slaveCount;
float *S_set = matToArray(S);
float *global_M = new float[interval];
float *global_C = new float[interval*interval];
float **train_data_arr = new float*[NUM_SLAVES];
float **train_target_arr = new float*[NUM_SLAVES];
float **test_data_arr = new float*[NUM_SLAVES];
float **local_M_arr = new float*[NUM_SLAVES];
float **local_C_arr = new float*[NUM_SLAVES];
for(int i=0;i<NUM_SLAVES;i++){
local_M_arr[i] = new float[interval];
local_C_arr[i] = new float[interval*interval];
}
cudaStream_t streams[NUM_SLAVES];
int s = sizeof(float);
// device copies
float *d_support, *d_train_data, *d_train_target, *d_test_data, *local_M, *local_C;
// allocate space for device copies
cudaMalloc((void **)&d_support, CARD_SUPPORT_SET*NUM_FEATURES*s);
cudaMalloc((void **)&d_train_data, NUM_SLAVES*interval*NUM_FEATURES*s);
cudaMalloc((void **)&d_train_target, NUM_SLAVES*interval*1*s);
cudaMalloc((void **)&d_test_data, NUM_SLAVES*interval*NUM_FEATURES*s);
cudaMalloc((void **)&local_M, NUM_SLAVES*interval*1*s);
cudaMalloc((void **)&local_C, NUM_SLAVES*interval*interval*s);
// copy common support set to device memory first
cudaMemcpy(d_support, S_set, CARD_SUPPORT_SET*NUM_FEATURES*s, cudaMemcpyHostToDevice);
for (slaveCount=0; slaveCount < NUM_SLAVES; slaveCount++){
// split data for each slave
train_data_arr[slaveCount] = matToArray(train_data.rows(slaveCount*interval, (slaveCount+1)*interval-1));
train_target_arr[slaveCount] = matToArray(train_target.rows(slaveCount*interval, (slaveCount+1)*interval-1));
test_data_arr[slaveCount] = matToArray(test_data.rows(slaveCount*interval, (slaveCount+1)*interval-1));
// copy the data for train, target and test into device memory
cudaMemcpy(&d_train_data[slaveCount*(interval*NUM_FEATURES)], train_data_arr[slaveCount], interval*NUM_FEATURES*s, cudaMemcpyHostToDevice);
cudaMemcpy(&d_train_target[slaveCount*(interval*1)], train_target_arr[slaveCount], interval*1*s, cudaMemcpyHostToDevice);
cudaMemcpy(&d_test_data[slaveCount*(interval*NUM_FEATURES)], test_data_arr[slaveCount], interval*NUM_FEATURES*s, cudaMemcpyHostToDevice);
}
// start NUM_SLAVES workers to calculate for local summary
for (slaveCount = 0; slaveCount < NUM_SLAVES; slaveCount++) {
// create new stream for parallel grid execution
cudaStreamCreate(&streams[slaveCount]);
// launch one worker(slave) kernel per stream
slave_local<<<1, 1, 0, streams[slaveCount]>>>(partition[slaveCount], d_support,
&d_train_data[slaveCount*(interval*NUM_FEATURES)],
&d_train_target[slaveCount*(interval*1)],
&local_M[slaveCount*(interval*1)],
&local_C[slaveCount*(interval*interval)]);
}
// synchronice all streams
for(int i=0; i<NUM_SLAVES; i++){
cudaStreamSynchronize(streams[i]);
}
// Copy result back to host
for (int slaveCount=0; slaveCount<NUM_SLAVES; slaveCount++){
cudaMemcpy(local_M_arr[slaveCount], &local_M[slaveCount*(interval*1)], interval*1*s, cudaMemcpyDeviceToHost);
cudaMemcpy(local_C_arr[slaveCount], &local_C[slaveCount*(interval*interval)], interval*interval*s, cudaMemcpyDeviceToHost);
}
// free device memory
cudaFree(d_train_data);
cudaFree(d_train_target);
// sum up local summary to get global summary
for (slaveCount = 0; slaveCount < NUM_SLAVES; slaveCount++) {
for(int i=0; i<interval; i++){
global_M[i] += local_M_arr[slaveCount][i];
for(int j=0; j<interval; j++){
global_C[i*interval+j] += local_C_arr[slaveCount][i*interval+j];
}
}
}
// initialize variables for global summary to be copied to device
float *d_global_M, *d_global_C;
float *d_pred_M;
interval = (NUM_SAMPLES-NUM_SAMPLES/2)/NUM_SLAVES;
// allocate space for global summaries on device
cudaMalloc((void **)&d_global_M, interval*NUM_FEATURES*s);
cudaMalloc((void **)&d_global_C, interval*interval*s);
cudaMalloc((void **)&d_pred_M, NUM_SAMPLES/3*s);
// copy global summaries to device
cudaMemcpy(d_global_M, global_M, interval*NUM_FEATURES*s, cudaMemcpyHostToDevice);
cudaMemcpy(d_global_C, global_C, interval*interval*s, cudaMemcpyHostToDevice);
// calculate for final prediction
for (slaveCount = 0; slaveCount < NUM_SLAVES; slaveCount++) {
// launch one worker(slave) kernel per stream (reuse stream from previous)
slave_global<<<1, 1, 0, streams[slaveCount]>>>(interval, d_support,
&d_test_data[slaveCount*interval*NUM_FEATURES],
d_global_M, d_global_C, &d_pred_M[slaveCount*(interval)]);
}
// synchronice all streams
for(int i=0; i<NUM_SLAVES; i++){
cudaStreamSynchronize(streams[i]);
}
// synchronice all device functions
cudaDeviceSynchronize();
// Copy prediciton result back to host
for (int slaveCount=0; slaveCount<NUM_SLAVES; slaveCount++){
cudaMemcpy(&pred[slaveCount], &d_pred_M[slaveCount*(interval)], interval*sizeof(float), cudaMemcpyDeviceToHost);
}
// Cleanup
cudaFree(d_support);
cudaFree(d_test_data);
cudaFree(local_C);
cudaFree(d_global_M);
cudaFree(d_global_C);
cudaFree(d_pred_M);
// results are in pred (float* pred)
}
// main runs on CPU
int main(void){
clock_t start = clock();
// load data from csv file
std::string path = "../hdb.csv";
mat data = parseCsvFile(path, NUM_SAMPLES);
// normalise the dataset
int rows = data.n_rows;
int columns = data.n_cols;
mat Max = max(data, 0);
mat Min = min(data, 0);
for(int i=0;i<rows;i++){
// ignore the last target column
for(int j=1;j<columns; j++){
data(i,j) = (data(i,j)-Min(0, j))/Max(0, j);
}
}
// split data into training and testing samples
int all_samples = data.n_rows;
mat train_data = data.rows(0, all_samples/2-1).cols(1, 11);
mat train_target = data.rows(0, all_samples/2-1).col(0);
mat test_data = data.rows(all_samples/2, all_samples-1).cols(1, 11);
mat test_target = data.rows(all_samples/2, all_samples-1).col(0);
float *pred = new float[all_samples-all_samples/2];
// get the support data set and partitions of training data set
mat support;
int partitions[NUM_SLAVES+1];
int intervals = all_samples/(2*NUM_SLAVES);
for(int i=0;i<NUM_SLAVES;i++){
partitions[i+1] = all_samples/(2*NUM_SLAVES);
int idx = i*intervals;
for(int j=0;j<CARD_SUPPORT_SET/NUM_SLAVES;j++){
support.insert_rows(0, train_data.row(idx+j));
}
}
// call master function (execute on CPU) to start slaves (working on GPU)
master(support, pred, partitions, train_data, train_target, test_data, test_target, intervals);
clock_t end = clock();
double time_spent= (double)(end-start) / CLOCKS_PER_SEC;
printf("Total time for %d slaves to execute %d samples: %f\n", NUM_SLAVES, NUM_SAMPLES, time_spent);
// for printing out predictions in pred
/*
for(int i = 0; i < (all_samples-all_samples/2); i++){
cout << pred[i] << "(" << test_target(i, 0) << ")" << "\t";
if(i%10==0 && i!=0){
cout<<endl;
}
}
*/
return(0);
}
|
ce21a9fcad0c1cb883b17f904101bb2d332ecab4.hip | // !!! This is a file automatically generated by hipify!!!
#if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
hipStream_t stream);
#pragma GCC diagnostic pop
#endif
| ce21a9fcad0c1cb883b17f904101bb2d332ecab4.cu | #if !MEGDNN_TEGRA_X1
// generated by gen_cuda_conv_bias_kern_impls.py
// ignore warning of cutlass
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wunused-parameter"
#pragma GCC diagnostic ignored "-Wstrict-aliasing"
#include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl"
using LayoutSrc = cutlass::layout::TensorNCxHWx<4>;
using LayoutFilter = cutlass::layout::TensorCxRSKx<4>;
using LayoutDst = cutlass::layout::TensorNCHW;
using ThreadBlockShape = cutlass::gemm::GemmShape<32, 64, 32>;
using WarpShape = cutlass::gemm::GemmShape<32, 64, 32>;
using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>;
using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationRelu<
float, 1, int32_t, float, float>;
using Convolution = cutlass::convolution::device::Convolution<
int8_t, LayoutSrc, int8_t, LayoutFilter, float,
LayoutDst, float, LayoutDst, int32_t,
cutlass::convolution::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61,
ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp,
cutlass::convolution::threadblock::ConvolutionNCxHWxThreadblockSwizzle<
cutlass::convolution::ConvType::kConvolution>,
2, 4, 16, false,
cutlass::arch::OpMultiplyAdd>;
template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>(
const typename Convolution::ElementSrc* d_src,
const typename Convolution::ElementFilter* d_filter,
const typename Convolution::ElementBias* d_bias,
const typename Convolution::ElementDst* d_z,
typename Convolution::ElementDst* d_dst,
int* workspace,
typename Convolution::ConvolutionParameter const& conv_param,
typename Convolution::EpilogueOutputOp::Params const& epilogue,
cudaStream_t stream);
#pragma GCC diagnostic pop
#endif
|
7af126b06d495e10a0d01135bf17d832b16960d4.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* this program is a simple test of the atomicAdd function for serial-dependent
* addition of results
*
*/
#include <iostream>
#define TOTAL_SIZE 100000
#define nTPB 256
#define NUM_ATOMS 20
#define NUM_THREADS 12
#define NUM_BLOCKS 10
#define LENGTH_LOOKUP 240
#define cudaCheckErrors(msg) \
do { \
hipError_t __err = hipGetLastError(); \
if (__err != hipSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, hipGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__global__ void kernelCode(float *result) {
int index = threadIdx.x+blockIdx.x*blockDim.x;
if (index < TOTAL_SIZE)
{
atomicAdd(result, 1.0f);
}
}
__global__ void atomicAddTestKernel( float *inputAtoms ) {
float num2Add = (float) threadIdx.x+blockIdx.x*blockDim.x;
for ( int i = 0; i < NUM_ATOMS; i++ )
atomicAdd( &inputAtoms[i], num2Add );
}
__global__ void atomicAddLookupTableKernel ( int *lookup ) {
int index = atomicAdd( &lookup[0], 1 );
// printf("%d %d\n", index, threadIdx.x);
lookup[index+1] = threadIdx.x+1;
}
int main(){
//--------------------------------------------------------------------------
// stock code (one number)
// allocate variable on the GPU
float h_result, *d_result;
hipMalloc((void **)&d_result, sizeof(float));
cudaCheckErrors("cuda malloc fail");
// copy local variable to GPU
h_result = 0.0f;
hipMemcpy(d_result, &h_result, sizeof(float), hipMemcpyHostToDevice);
cudaCheckErrors("hipMemcpy 1 fail");
// run atomicAdd kernel
hipLaunchKernelGGL(( kernelCode), dim3((TOTAL_SIZE+nTPB-1)/nTPB), dim3(nTPB), 0, 0, d_result);
hipDeviceSynchronize();
cudaCheckErrors("kernel fail");
// copy back result to local memory
hipMemcpy(&h_result, d_result, sizeof(float), hipMemcpyDeviceToHost);
cudaCheckErrors("hipMemcpy 2 fail");
std::cout<< "result = " << h_result << std::endl;
//--------------------------------------------------------------------------
// practice code (array)
// allocate variable on the GPU and CPU
int i;
float *inputAtoms, *d_inputAtoms;
size_t sizeArray = sizeof(float) * NUM_ATOMS;
inputAtoms = (float*) malloc ( sizeArray );
hipMalloc( &d_inputAtoms, sizeArray );
cudaCheckErrors("cuda malloc fail");
// copy local variable to GPU
for ( i = 0; i < NUM_ATOMS; ++i ) {
inputAtoms[i] = 1.0f;
printf("%d ", (int)inputAtoms[i]);
} printf("\n");
hipMemcpy( d_inputAtoms, inputAtoms, sizeArray, hipMemcpyHostToDevice );
cudaCheckErrors("hipMemcpy 1 fail");
// run atomicAdd kernel
hipLaunchKernelGGL(( atomicAddTestKernel), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, d_inputAtoms);
hipDeviceSynchronize();
cudaCheckErrors("kernel fail");
// copy back result to local memory
hipMemcpy( inputAtoms, d_inputAtoms, sizeArray, hipMemcpyDeviceToHost );
cudaCheckErrors("hipMemcpy 2 fail");
// report results and close
for ( i = 0; i < NUM_ATOMS; ++i )
printf("%d ", (int)inputAtoms[i]);
int tmp = (NUM_BLOCKS)*(NUM_THREADS);
printf("\nshould be %d\n",
1 - tmp + tmp * (tmp + 1) / 2);
free( inputAtoms );
hipFree( d_inputAtoms );
//--------------------------------------------------------------------------
// practice lookup table prototype
// allocate variable on the GPU and CPU
int *lookup, *d_lookup;
sizeArray = sizeof(float) * LENGTH_LOOKUP;
lookup = (int*) malloc ( sizeArray );
hipMalloc( &d_lookup, sizeArray );
cudaCheckErrors("cuda malloc fail");
// copy local variable to GPU
for ( i = 0; i < LENGTH_LOOKUP; ++i ) {
lookup[i] = 0;
printf("%d ", (int)inputAtoms[i]);
} printf("\n");
hipMemcpy( d_lookup, lookup, sizeArray, hipMemcpyHostToDevice );
cudaCheckErrors("hipMemcpy 1 fail");
// run atomicAdd lookup table test kernel
hipLaunchKernelGGL(( atomicAddLookupTableKernel), dim3(1), dim3(LENGTH_LOOKUP-1), 0, 0, d_lookup);
hipDeviceSynchronize();
cudaCheckErrors("kernel fail");
// copy back result to local memory
hipMemcpy( lookup, d_lookup, sizeArray, hipMemcpyDeviceToHost );
cudaCheckErrors("hipMemcpy 2 fail");
// report results and close
for ( i = 0; i < LENGTH_LOOKUP; ++i )
printf("%d ", lookup[i]);
printf("\n");
free( lookup );
hipFree( d_lookup );
return 0;
}
| 7af126b06d495e10a0d01135bf17d832b16960d4.cu | /*
* this program is a simple test of the atomicAdd function for serial-dependent
* addition of results
*
*/
#include <iostream>
#define TOTAL_SIZE 100000
#define nTPB 256
#define NUM_ATOMS 20
#define NUM_THREADS 12
#define NUM_BLOCKS 10
#define LENGTH_LOOKUP 240
#define cudaCheckErrors(msg) \
do { \
cudaError_t __err = cudaGetLastError(); \
if (__err != cudaSuccess) { \
fprintf(stderr, "Fatal error: %s (%s at %s:%d)\n", \
msg, cudaGetErrorString(__err), \
__FILE__, __LINE__); \
fprintf(stderr, "*** FAILED - ABORTING\n"); \
exit(1); \
} \
} while (0)
__global__ void kernelCode(float *result) {
int index = threadIdx.x+blockIdx.x*blockDim.x;
if (index < TOTAL_SIZE)
{
atomicAdd(result, 1.0f);
}
}
__global__ void atomicAddTestKernel( float *inputAtoms ) {
float num2Add = (float) threadIdx.x+blockIdx.x*blockDim.x;
for ( int i = 0; i < NUM_ATOMS; i++ )
atomicAdd( &inputAtoms[i], num2Add );
}
__global__ void atomicAddLookupTableKernel ( int *lookup ) {
int index = atomicAdd( &lookup[0], 1 );
// printf("%d %d\n", index, threadIdx.x);
lookup[index+1] = threadIdx.x+1;
}
int main(){
//--------------------------------------------------------------------------
// stock code (one number)
// allocate variable on the GPU
float h_result, *d_result;
cudaMalloc((void **)&d_result, sizeof(float));
cudaCheckErrors("cuda malloc fail");
// copy local variable to GPU
h_result = 0.0f;
cudaMemcpy(d_result, &h_result, sizeof(float), cudaMemcpyHostToDevice);
cudaCheckErrors("cudaMemcpy 1 fail");
// run atomicAdd kernel
kernelCode<<<(TOTAL_SIZE+nTPB-1)/nTPB, nTPB>>>(d_result);
cudaDeviceSynchronize();
cudaCheckErrors("kernel fail");
// copy back result to local memory
cudaMemcpy(&h_result, d_result, sizeof(float), cudaMemcpyDeviceToHost);
cudaCheckErrors("cudaMemcpy 2 fail");
std::cout<< "result = " << h_result << std::endl;
//--------------------------------------------------------------------------
// practice code (array)
// allocate variable on the GPU and CPU
int i;
float *inputAtoms, *d_inputAtoms;
size_t sizeArray = sizeof(float) * NUM_ATOMS;
inputAtoms = (float*) malloc ( sizeArray );
cudaMalloc( &d_inputAtoms, sizeArray );
cudaCheckErrors("cuda malloc fail");
// copy local variable to GPU
for ( i = 0; i < NUM_ATOMS; ++i ) {
inputAtoms[i] = 1.0f;
printf("%d ", (int)inputAtoms[i]);
} printf("\n");
cudaMemcpy( d_inputAtoms, inputAtoms, sizeArray, cudaMemcpyHostToDevice );
cudaCheckErrors("cudaMemcpy 1 fail");
// run atomicAdd kernel
atomicAddTestKernel<<<NUM_BLOCKS, NUM_THREADS>>>(d_inputAtoms);
cudaDeviceSynchronize();
cudaCheckErrors("kernel fail");
// copy back result to local memory
cudaMemcpy( inputAtoms, d_inputAtoms, sizeArray, cudaMemcpyDeviceToHost );
cudaCheckErrors("cudaMemcpy 2 fail");
// report results and close
for ( i = 0; i < NUM_ATOMS; ++i )
printf("%d ", (int)inputAtoms[i]);
int tmp = (NUM_BLOCKS)*(NUM_THREADS);
printf("\nshould be %d\n",
1 - tmp + tmp * (tmp + 1) / 2);
free( inputAtoms );
cudaFree( d_inputAtoms );
//--------------------------------------------------------------------------
// practice lookup table prototype
// allocate variable on the GPU and CPU
int *lookup, *d_lookup;
sizeArray = sizeof(float) * LENGTH_LOOKUP;
lookup = (int*) malloc ( sizeArray );
cudaMalloc( &d_lookup, sizeArray );
cudaCheckErrors("cuda malloc fail");
// copy local variable to GPU
for ( i = 0; i < LENGTH_LOOKUP; ++i ) {
lookup[i] = 0;
printf("%d ", (int)inputAtoms[i]);
} printf("\n");
cudaMemcpy( d_lookup, lookup, sizeArray, cudaMemcpyHostToDevice );
cudaCheckErrors("cudaMemcpy 1 fail");
// run atomicAdd lookup table test kernel
atomicAddLookupTableKernel<<<1, LENGTH_LOOKUP-1>>>(d_lookup);
cudaDeviceSynchronize();
cudaCheckErrors("kernel fail");
// copy back result to local memory
cudaMemcpy( lookup, d_lookup, sizeArray, cudaMemcpyDeviceToHost );
cudaCheckErrors("cudaMemcpy 2 fail");
// report results and close
for ( i = 0; i < LENGTH_LOOKUP; ++i )
printf("%d ", lookup[i]);
printf("\n");
free( lookup );
cudaFree( d_lookup );
return 0;
}
|
8fd9eb29fec26b936042c87e831a95900e415e29.hip | // !!! This is a file automatically generated by hipify!!!
#include "./common.h"
#include <hip/hip_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. Only a single thread block is used in this small case, for simplicity.
* sumArraysOnHost sequentially iterates through vector elements on the host.
* This version of sumArrays adds host timers to measure GPU and CPU
* performance.
*/
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void initialData(float *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (float)( rand() & 0xFF ) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
hipDeviceProp_t deviceProp;
CHECK(hipGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(hipSetDevice(dev));
// set up data size of vectors
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = seconds();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = seconds() - iStart;
printf("initialData Time elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = seconds();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = seconds() - iStart;
printf("sumArraysOnHost Time elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_A, *d_B, *d_C;
CHECK(hipMalloc((float**)&d_A, nBytes));
CHECK(hipMalloc((float**)&d_B, nBytes));
CHECK(hipMalloc((float**)&d_C, nBytes));
// transfer data from host to device
CHECK(hipMemcpy(d_A, h_A, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_B, h_B, nBytes, hipMemcpyHostToDevice));
CHECK(hipMemcpy(d_C, gpuRef, nBytes, hipMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 512;
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
iStart = seconds();
hipLaunchKernelGGL(( sumArraysOnGPU), dim3(grid), dim3(block), 0, 0, d_A, d_B, d_C, nElem);
CHECK(hipDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumArraysOnGPU <<< %d, %d >>> Time elapsed %f sec\n", grid.x,
block.x, iElaps);
// check kernel error
CHECK(hipGetLastError()) ;
// copy kernel result back to host side
CHECK(hipMemcpy(gpuRef, d_C, nBytes, hipMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(hipFree(d_A));
CHECK(hipFree(d_B));
CHECK(hipFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
}
| 8fd9eb29fec26b936042c87e831a95900e415e29.cu | #include "./common.h"
#include <cuda_runtime.h>
#include <stdio.h>
/*
* This example demonstrates a simple vector sum on the GPU and on the host.
* sumArraysOnGPU splits the work of the vector sum across CUDA threads on the
* GPU. Only a single thread block is used in this small case, for simplicity.
* sumArraysOnHost sequentially iterates through vector elements on the host.
* This version of sumArrays adds host timers to measure GPU and CPU
* performance.
*/
void checkResult(float *hostRef, float *gpuRef, const int N)
{
double epsilon = 1.0E-8;
bool match = 1;
for (int i = 0; i < N; i++)
{
if (abs(hostRef[i] - gpuRef[i]) > epsilon)
{
match = 0;
printf("Arrays do not match!\n");
printf("host %5.2f gpu %5.2f at current %d\n", hostRef[i],
gpuRef[i], i);
break;
}
}
if (match) printf("Arrays match.\n\n");
return;
}
void initialData(float *ip, int size)
{
// generate different seed for random number
time_t t;
srand((unsigned) time(&t));
for (int i = 0; i < size; i++)
{
ip[i] = (float)( rand() & 0xFF ) / 10.0f;
}
return;
}
void sumArraysOnHost(float *A, float *B, float *C, const int N)
{
for (int idx = 0; idx < N; idx++)
{
C[idx] = A[idx] + B[idx];
}
}
__global__ void sumArraysOnGPU(float *A, float *B, float *C, const int N)
{
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < N) C[i] = A[i] + B[i];
}
int main(int argc, char **argv)
{
printf("%s Starting...\n", argv[0]);
// set up device
int dev = 0;
cudaDeviceProp deviceProp;
CHECK(cudaGetDeviceProperties(&deviceProp, dev));
printf("Using Device %d: %s\n", dev, deviceProp.name);
CHECK(cudaSetDevice(dev));
// set up data size of vectors
int nElem = 1 << 24;
printf("Vector size %d\n", nElem);
// malloc host memory
size_t nBytes = nElem * sizeof(float);
float *h_A, *h_B, *hostRef, *gpuRef;
h_A = (float *)malloc(nBytes);
h_B = (float *)malloc(nBytes);
hostRef = (float *)malloc(nBytes);
gpuRef = (float *)malloc(nBytes);
double iStart, iElaps;
// initialize data at host side
iStart = seconds();
initialData(h_A, nElem);
initialData(h_B, nElem);
iElaps = seconds() - iStart;
printf("initialData Time elapsed %f sec\n", iElaps);
memset(hostRef, 0, nBytes);
memset(gpuRef, 0, nBytes);
// add vector at host side for result checks
iStart = seconds();
sumArraysOnHost(h_A, h_B, hostRef, nElem);
iElaps = seconds() - iStart;
printf("sumArraysOnHost Time elapsed %f sec\n", iElaps);
// malloc device global memory
float *d_A, *d_B, *d_C;
CHECK(cudaMalloc((float**)&d_A, nBytes));
CHECK(cudaMalloc((float**)&d_B, nBytes));
CHECK(cudaMalloc((float**)&d_C, nBytes));
// transfer data from host to device
CHECK(cudaMemcpy(d_A, h_A, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_B, h_B, nBytes, cudaMemcpyHostToDevice));
CHECK(cudaMemcpy(d_C, gpuRef, nBytes, cudaMemcpyHostToDevice));
// invoke kernel at host side
int iLen = 512;
dim3 block (iLen);
dim3 grid ((nElem + block.x - 1) / block.x);
iStart = seconds();
sumArraysOnGPU<<<grid, block>>>(d_A, d_B, d_C, nElem);
CHECK(cudaDeviceSynchronize());
iElaps = seconds() - iStart;
printf("sumArraysOnGPU <<< %d, %d >>> Time elapsed %f sec\n", grid.x,
block.x, iElaps);
// check kernel error
CHECK(cudaGetLastError()) ;
// copy kernel result back to host side
CHECK(cudaMemcpy(gpuRef, d_C, nBytes, cudaMemcpyDeviceToHost));
// check device results
checkResult(hostRef, gpuRef, nElem);
// free device global memory
CHECK(cudaFree(d_A));
CHECK(cudaFree(d_B));
CHECK(cudaFree(d_C));
// free host memory
free(h_A);
free(h_B);
free(hostRef);
free(gpuRef);
return(0);
}
|
d542390175d337539f7e28f7f822f22d1c261fed.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <pcl/gpu/utils/device/funcattrib.hpp>
//include <pcl/gpu/utils/device/block.hpp>
//#include <pcl/gpu/utils/device/warp.hpp>
namespace pcl
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
//process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.x = (V.x * std::abs (Fn) + Vnx * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
//process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.y = (V.y * std::abs (Fn) + Vny * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
//process dz
//if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.z = (V.z * std::abs (Fn) + Vnz * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (z + 1 < VOLUME_Z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < VOLUME_X && y < VOLUME_Y) */
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
PointType *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, pos);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr) const {
*ptr = make_float4 (x, y, z, 0);
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr) const {
*ptr = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs) {
fs ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
pcl::device::extractCloud (const PtrStep<short2>& volume, const float3& volume_size,
PtrSz<PointType> output)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output = output;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//hipFuncSetCacheConfig(extractKernel, hipFuncCachePreferL1);
//printFuncAttrib(extractKernel);
hipLaunchKernelGGL(( extractKernel), dim3(grid), dim3(block), 0, 0, fs);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
int size;
cudaSafeCall ( hipMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (std::size_t)size;
}
namespace pcl
{
namespace device
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
constexpr float qnan = std::numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c;
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
pcl::device::extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
hipLaunchKernelGGL(( extractNormalsKernel), dim3(grid), dim3(block), 0, 0, en);
cudaSafeCall ( hipGetLastError () );
cudaSafeCall (hipDeviceSynchronize ());
}
using namespace pcl::device;
template void pcl::device::extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void pcl::device::extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
| d542390175d337539f7e28f7f822f22d1c261fed.cu | /*
* Software License Agreement (BSD License)
*
* Point Cloud Library (PCL) - www.pointclouds.org
* Copyright (c) 2011, Willow Garage, Inc.
*
* All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
*
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above
* copyright notice, this list of conditions and the following
* disclaimer in the documentation and/or other materials provided
* with the distribution.
* * Neither the name of Willow Garage, Inc. nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
* "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
* LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
* FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
* COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
* INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
* BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
* LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
* ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*
*/
#include "device.hpp"
//#include <pcl/gpu/utils/device/funcattrib.hpp>
//include <pcl/gpu/utils/device/block.hpp>
//#include <pcl/gpu/utils/device/warp.hpp>
namespace pcl
{
namespace device
{
////////////////////////////////////////////////////////////////////////////////////////
///// Full Volume Scan6
enum
{
CTA_SIZE_X = 32,
CTA_SIZE_Y = 6,
CTA_SIZE = CTA_SIZE_X * CTA_SIZE_Y,
MAX_LOCAL_POINTS = 3
};
__device__ int global_count = 0;
__device__ int output_count;
__device__ unsigned int blocks_done = 0;
__shared__ float storage_X[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Y[CTA_SIZE * MAX_LOCAL_POINTS];
__shared__ float storage_Z[CTA_SIZE * MAX_LOCAL_POINTS];
struct FullScan6
{
PtrStep<short2> volume;
float3 cell_size;
mutable PtrSz<PointType> output;
__device__ __forceinline__ float
fetch (int x, int y, int z, int& weight) const
{
float tsdf;
unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x], tsdf, weight);
return tsdf;
}
__device__ __forceinline__ void
operator () () const
{
int x = threadIdx.x + blockIdx.x * CTA_SIZE_X;
int y = threadIdx.y + blockIdx.y * CTA_SIZE_Y;
if (__all_sync (__activemask (), x >= VOLUME_X)
|| __all_sync (__activemask (), y >= VOLUME_Y))
return;
float3 V;
V.x = (x + 0.5f) * cell_size.x;
V.y = (y + 0.5f) * cell_size.y;
int ftid = Block::flattenedThreadId ();
for (int z = 0; z < VOLUME_Z - 1; ++z)
{
float3 points[MAX_LOCAL_POINTS];
int local_count = 0;
if (x < VOLUME_X && y < VOLUME_Y)
{
int W;
float F = fetch (x, y, z, W);
if (W != 0 && F != 1.f)
{
V.z = (z + 0.5f) * cell_size.z;
//process dx
if (x + 1 < VOLUME_X)
{
int Wn;
float Fn = fetch (x + 1, y, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.y = V.y;
p.z = V.z;
float Vnx = V.x + cell_size.x;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.x = (V.x * std::abs (Fn) + Vnx * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (x + 1 < VOLUME_X) */
//process dy
if (y + 1 < VOLUME_Y)
{
int Wn;
float Fn = fetch (x, y + 1, z, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.z = V.z;
float Vny = V.y + cell_size.y;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.y = (V.y * std::abs (Fn) + Vny * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (y + 1 < VOLUME_Y) */
//process dz
//if (z + 1 < VOLUME_Z) // guaranteed by loop
{
int Wn;
float Fn = fetch (x, y, z + 1, Wn);
if (Wn != 0 && Fn != 1.f)
if ((F > 0 && Fn < 0) || (F < 0 && Fn > 0))
{
float3 p;
p.x = V.x;
p.y = V.y;
float Vnz = V.z + cell_size.z;
float d_inv = 1.f / (std::abs (F) + std::abs (Fn));
p.z = (V.z * std::abs (Fn) + Vnz * std::abs (F)) * d_inv;
points[local_count++] = p;
}
} /* if (z + 1 < VOLUME_Z) */
} /* if (W != 0 && F != 1.f) */
} /* if (x < VOLUME_X && y < VOLUME_Y) */
int total_warp = __popc (__ballot_sync (__activemask (), local_count > 0))
+ __popc (__ballot_sync (__activemask (), local_count > 1))
+ __popc (__ballot_sync (__activemask (), local_count > 2));
if (total_warp > 0)
{
int lane = Warp::laneId ();
int storage_index = (ftid >> Warp::LOG_WARP_SIZE) * Warp::WARP_SIZE * MAX_LOCAL_POINTS;
volatile int* cta_buffer = (int*)(storage_X + storage_index);
cta_buffer[lane] = local_count;
int offset = scan_warp<exclusive>(cta_buffer, lane);
if (lane == 0)
{
int old_global_count = atomicAdd (&global_count, total_warp);
cta_buffer[0] = old_global_count;
}
int old_global_count = cta_buffer[0];
for (int l = 0; l < local_count; ++l)
{
storage_X[storage_index + offset + l] = points[l].x;
storage_Y[storage_index + offset + l] = points[l].y;
storage_Z[storage_index + offset + l] = points[l].z;
}
PointType *pos = output.data + old_global_count + lane;
for (int idx = lane; idx < total_warp; idx += Warp::STRIDE, pos += Warp::STRIDE)
{
float x = storage_X[storage_index + idx];
float y = storage_Y[storage_index + idx];
float z = storage_Z[storage_index + idx];
store_point_type (x, y, z, pos);
}
bool full = (old_global_count + total_warp) >= output.size;
if (full)
break;
}
} /* for(int z = 0; z < VOLUME_Z - 1; ++z) */
///////////////////////////
// prepare for future scans
if (ftid == 0)
{
unsigned int total_blocks = gridDim.x * gridDim.y * gridDim.z;
unsigned int value = atomicInc (&blocks_done, total_blocks);
//last block
if (value == total_blocks - 1)
{
output_count = min ((int)output.size, global_count);
blocks_done = 0;
global_count = 0;
}
}
} /* operator() */
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float4* ptr) const {
*ptr = make_float4 (x, y, z, 0);
}
__device__ __forceinline__ void
store_point_type (float x, float y, float z, float3* ptr) const {
*ptr = make_float3 (x, y, z);
}
};
__global__ void
extractKernel (const FullScan6 fs) {
fs ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
size_t
pcl::device::extractCloud (const PtrStep<short2>& volume, const float3& volume_size,
PtrSz<PointType> output)
{
FullScan6 fs;
fs.volume = volume;
fs.cell_size.x = volume_size.x / VOLUME_X;
fs.cell_size.y = volume_size.y / VOLUME_Y;
fs.cell_size.z = volume_size.z / VOLUME_Z;
fs.output = output;
dim3 block (CTA_SIZE_X, CTA_SIZE_Y);
dim3 grid (divUp (VOLUME_X, block.x), divUp (VOLUME_Y, block.y));
//cudaFuncSetCacheConfig(extractKernel, cudaFuncCachePreferL1);
//printFuncAttrib(extractKernel);
extractKernel<<<grid, block>>>(fs);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
int size;
cudaSafeCall ( cudaMemcpyFromSymbol (&size, output_count, sizeof(size)) );
return (std::size_t)size;
}
namespace pcl
{
namespace device
{
template<typename NormalType>
struct ExtractNormals
{
float3 cell_size;
PtrStep<short2> volume;
PtrSz<PointType> points;
mutable NormalType* output;
__device__ __forceinline__ float
readTsdf (int x, int y, int z) const
{
return unpack_tsdf (volume.ptr (VOLUME_Y * z + y)[x]);
}
__device__ __forceinline__ float3
fetchPoint (int idx) const
{
PointType p = points.data[idx];
return make_float3 (p.x, p.y, p.z);
}
__device__ __forceinline__ void
storeNormal (int idx, float3 normal) const
{
NormalType n;
n.x = normal.x; n.y = normal.y; n.z = normal.z;
output[idx] = n;
}
__device__ __forceinline__ int3
getVoxel (const float3& point) const
{
int vx = __float2int_rd (point.x / cell_size.x); // round to negative infinity
int vy = __float2int_rd (point.y / cell_size.y);
int vz = __float2int_rd (point.z / cell_size.z);
return make_int3 (vx, vy, vz);
}
__device__ __forceinline__ void
operator () () const
{
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx >= points.size)
return;
constexpr float qnan = std::numeric_limits<float>::quiet_NaN ();
float3 n = make_float3 (qnan, qnan, qnan);
float3 point = fetchPoint (idx);
int3 g = getVoxel (point);
if (g.x > 1 && g.y > 1 && g.z > 1 && g.x < VOLUME_X - 2 && g.y < VOLUME_Y - 2 && g.z < VOLUME_Z - 2)
{
float3 t;
t = point;
t.x += cell_size.x;
float Fx1 = interpolateTrilineary (t);
t = point;
t.x -= cell_size.x;
float Fx2 = interpolateTrilineary (t);
n.x = (Fx1 - Fx2);
t = point;
t.y += cell_size.y;
float Fy1 = interpolateTrilineary (t);
t = point;
t.y -= cell_size.y;
float Fy2 = interpolateTrilineary (t);
n.y = (Fy1 - Fy2);
t = point;
t.z += cell_size.z;
float Fz1 = interpolateTrilineary (t);
t = point;
t.z -= cell_size.z;
float Fz2 = interpolateTrilineary (t);
n.z = (Fz1 - Fz2);
n = normalized (n);
}
storeNormal (idx, n);
}
__device__ __forceinline__ float
interpolateTrilineary (const float3& point) const
{
int3 g = getVoxel (point);
float vx = (g.x + 0.5f) * cell_size.x;
float vy = (g.y + 0.5f) * cell_size.y;
float vz = (g.z + 0.5f) * cell_size.z;
g.x = (point.x < vx) ? (g.x - 1) : g.x;
g.y = (point.y < vy) ? (g.y - 1) : g.y;
g.z = (point.z < vz) ? (g.z - 1) : g.z;
float a = (point.x - (g.x + 0.5f) * cell_size.x) / cell_size.x;
float b = (point.y - (g.y + 0.5f) * cell_size.y) / cell_size.y;
float c = (point.z - (g.z + 0.5f) * cell_size.z) / cell_size.z;
float res = readTsdf (g.x + 0, g.y + 0, g.z + 0) * (1 - a) * (1 - b) * (1 - c) +
readTsdf (g.x + 0, g.y + 0, g.z + 1) * (1 - a) * (1 - b) * c +
readTsdf (g.x + 0, g.y + 1, g.z + 0) * (1 - a) * b * (1 - c) +
readTsdf (g.x + 0, g.y + 1, g.z + 1) * (1 - a) * b * c +
readTsdf (g.x + 1, g.y + 0, g.z + 0) * a * (1 - b) * (1 - c) +
readTsdf (g.x + 1, g.y + 0, g.z + 1) * a * (1 - b) * c +
readTsdf (g.x + 1, g.y + 1, g.z + 0) * a * b * (1 - c) +
readTsdf (g.x + 1, g.y + 1, g.z + 1) * a * b * c;
return res;
}
};
template<typename NormalType>
__global__ void
extractNormalsKernel (const ExtractNormals<NormalType> en) {
en ();
}
}
}
//////////////////////////////////////////////////////////////////////////////////////////////////////////////////
template<typename NormalType> void
pcl::device::extractNormals (const PtrStep<short2>& volume, const float3& volume_size,
const PtrSz<PointType>& points, NormalType* output)
{
ExtractNormals<NormalType> en;
en.volume = volume;
en.cell_size.x = volume_size.x / VOLUME_X;
en.cell_size.y = volume_size.y / VOLUME_Y;
en.cell_size.z = volume_size.z / VOLUME_Z;
en.points = points;
en.output = output;
dim3 block (256);
dim3 grid (divUp (points.size, block.x));
extractNormalsKernel<<<grid, block>>>(en);
cudaSafeCall ( cudaGetLastError () );
cudaSafeCall (cudaDeviceSynchronize ());
}
using namespace pcl::device;
template void pcl::device::extractNormals<PointType>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, PointType * output);
template void pcl::device::extractNormals<float8>(const PtrStep<short2>&volume, const float3 &volume_size, const PtrSz<PointType>&input, float8 * output);
|
a9c64c6236528aeee211b35febdebf7b06427396.hip | // !!! This is a file automatically generated by hipify!!!
/******************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.hip"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
hipError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000"
"\n Usage: ./sgemm-tiled <m> # All matrices are m x m"
"\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
// srand(217);
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMalloc((float**) &A_d, sizeof(float) * A_sz);
hipMalloc((float**) &B_d, sizeof(float) * B_sz);
hipMalloc((float**) &C_d, sizeof(float) * C_sz);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(A_d, A_h, sizeof(float) * A_sz, hipMemcpyHostToDevice);
hipMemcpy(B_d, B_h, sizeof(float) * B_sz, hipMemcpyHostToDevice);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cuda_ret = hipDeviceSynchronize();
if(cuda_ret != hipSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables to host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
hipMemcpy(C_h, C_d, sizeof(float) * C_sz, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
hipFree(A_d);
hipFree(B_d);
hipFree(C_d);
return 0;
}
| a9c64c6236528aeee211b35febdebf7b06427396.cu | /******************************************************************************
*
* (C) Copyright 2010 The Board of Trustees of the
* University of Illinois
* All Rights Reserved
*
******************************************************************************/
#include <stdio.h>
#include <stdlib.h>
#include "kernel.cu"
#include "support.h"
int main (int argc, char *argv[])
{
Timer timer;
cudaError_t cuda_ret;
// Initialize host variables ----------------------------------------------
printf("\nSetting up the problem..."); fflush(stdout);
startTime(&timer);
float *A_h, *B_h, *C_h;
float *A_d, *B_d, *C_d;
size_t A_sz, B_sz, C_sz;
unsigned matArow, matAcol;
unsigned matBrow, matBcol;
dim3 dim_grid, dim_block;
if (argc == 1) {
matArow = 1000;
matAcol = matBrow = 1000;
matBcol = 1000;
} else if (argc == 2) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[1]);
matBcol = atoi(argv[1]);
} else if (argc == 4) {
matArow = atoi(argv[1]);
matAcol = matBrow = atoi(argv[2]);
matBcol = atoi(argv[3]);
} else {
printf("\n Invalid input parameters!"
"\n Usage: ./sgemm-tiled # All matrices are 1000 x 1000"
"\n Usage: ./sgemm-tiled <m> # All matrices are m x m"
"\n Usage: ./sgemm-tiled <m> <k> <n> # A: m x k, B: k x n, C: m x n"
"\n");
exit(0);
}
// srand(217);
A_sz = matArow*matAcol;
B_sz = matBrow*matBcol;
C_sz = matArow*matBcol;
A_h = (float*) malloc( sizeof(float)*A_sz );
for (unsigned int i=0; i < A_sz; i++) { A_h[i] = (rand()%100)/100.00; }
B_h = (float*) malloc( sizeof(float)*B_sz );
for (unsigned int i=0; i < B_sz; i++) { B_h[i] = (rand()%100)/100.00; }
C_h = (float*) malloc( sizeof(float)*C_sz );
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
printf(" A: %u x %u\n B: %u x %u\n C: %u x %u\n", matArow, matAcol,
matBrow, matBcol, matArow, matBcol);
// Allocate device variables ----------------------------------------------
printf("Allocating device variables..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMalloc((float**) &A_d, sizeof(float) * A_sz);
cudaMalloc((float**) &B_d, sizeof(float) * B_sz);
cudaMalloc((float**) &C_d, sizeof(float) * C_sz);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy host variables to device ------------------------------------------
printf("Copying data from host to device..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(A_d, A_h, sizeof(float) * A_sz, cudaMemcpyHostToDevice);
cudaMemcpy(B_d, B_h, sizeof(float) * B_sz, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Launch kernel using standard sgemm interface ---------------------------
printf("Launching kernel..."); fflush(stdout);
startTime(&timer);
basicSgemm('N', 'N', matArow, matBcol, matBrow, 1.0f, \
A_d, matArow, B_d, matBrow, 0.0f, C_d, matBrow);
cuda_ret = cudaDeviceSynchronize();
if(cuda_ret != cudaSuccess) FATAL("Unable to launch kernel");
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Copy device variables to host ----------------------------------------
printf("Copying data from device to host..."); fflush(stdout);
startTime(&timer);
//INSERT CODE HERE
cudaMemcpy(C_h, C_d, sizeof(float) * C_sz, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
stopTime(&timer); printf("%f s\n", elapsedTime(timer));
// Verify correctness -----------------------------------------------------
printf("Verifying results..."); fflush(stdout);
verify(A_h, B_h, C_h, matArow, matAcol, matBcol);
// Free memory ------------------------------------------------------------
free(A_h);
free(B_h);
free(C_h);
//INSERT CODE HERE
cudaFree(A_d);
cudaFree(B_d);
cudaFree(C_d);
return 0;
}
|
232f006fe95c4ab21d41f7727077142c29bfebb8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#ifndef JOIN_MATCHING_BLOCKS_CU
#define JOIN_MATCHING_BLOCKS_CU
#define NUM_DELTA_PER_BLOCK 8
#define SMJ_NUM_THREADS_PER_BLOCK 512
#ifdef BINARY_SEARCH
__device__
int findNumResultInChunk(Record records[], int key)
{
int min = 0;
int max = SMJ_NUM_THREADS_PER_BLOCK;
int mid;
int cut;
while(max - min > 1) {
mid = (min + max) / 2;
cut = records[mid].y;
if(key > cut)
min = mid;
else
max = mid;
}
if(records[min].y >= key)
return min;
else
return max;
}
#else
__device__
int findNumResultInChunk(Record records[], int key)
{
int numResult=0;
for(int i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(records[i].y==key)
numResult++;
else
if(records[i].y>key)
break;
return numResult;
}
#endif
//the best, with shared memory, with coalesced access
__global__ void
joinMBCount_kernel(Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_n)
{
__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
for(pos=sStart;(pos+tid)<sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
{
tempValue=d_S[pos+tid];
//for(i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
// if(tempValue.y==tempBuf_R[i].y)
// numResult++;
//numResult+=findNumResultInChunk_seq(tempBuf_R,tempValue.y);
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
numResult++;
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
else
numResult=0;
d_n[resultID]=numResult;
}
#ifndef SAHRED
__global__ void joinMBCount_kernel_noShared(Record* d_tempBuf_R, Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_n)
{
//__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
Record* tempBuf_R;
tempBuf_R = d_tempBuf_R + blockIdx.x*SMJ_NUM_THREADS_PER_BLOCK;
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
for(pos=sStart;(pos+tid)<sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
{
tempValue=d_S[pos+tid];
//for(i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
// if(tempValue.y==tempBuf_R[i].y)
// numResult++;
//numResult+=findNumResultInChunk_seq(tempBuf_R,tempValue.y);
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
numResult++;
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
else
numResult=0;
d_n[resultID]=numResult;
}
#endif
#ifndef COALESCED
__global__ void
joinMBCount_noCoalesced_kernel(Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_n)
{
__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
int len = (sEnd - sStart)/numThread;
int start = sStart + len*tid;
int end = start + len;
if( tid == (numThread - 1) )
{
end = sEnd;
}
//for(pos=sStart + tid; pos < sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
for(pos=start; pos < end;pos++)
{
tempValue=d_S[pos];
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
numResult++;
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
else
numResult=0;
d_n[resultID]=numResult;
}
#endif
//best, with shared memory, with coalesced
__global__ void
joinMBWrite_kernel(Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_sum, Record *d_output)
{
__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
int base=d_sum[resultID];
for(pos=sStart;(pos+tid)<sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
{
tempValue=d_S[pos+tid];
//for(i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
// if(tempValue.y==tempBuf_R[i].y)
// numResult++;
//numResult+=findNumResultInChunk_seq(tempBuf_R,tempValue.y);
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
{
d_output[base].x=tempBuf_R[i].x;
d_output[base].y=tempValue.x;
base++;
}
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
}
#ifndef COALESCED
__global__ void
joinMBWrite_noCoalesced_kernel(Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_sum, Record *d_output)
{
__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
int base=d_sum[resultID];
int len = (sEnd - sStart)/numThread;
int start = sStart + len*tid;
int end = start + len;
if( tid == (numThread - 1) )
{
end = sEnd;
}
for(pos = start; pos < end; pos++ )
{
tempValue=d_S[pos];
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
{
d_output[base].x=tempBuf_R[i].x;
d_output[base].y=tempValue.x;
base++;
}
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
}
#endif
#ifndef SHARED_MEM
__global__ void
joinMBWrite_kernel_noShared(Record* d_tempBuf_R, Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_sum, Record *d_output)
{
//__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
Record* tempBuf_R;
tempBuf_R = d_tempBuf_R + blockIdx.x*SMJ_NUM_THREADS_PER_BLOCK;
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
int base=d_sum[resultID];
for(pos=sStart;(pos+tid)<sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
{
tempValue=d_S[pos+tid];
//for(i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
// if(tempValue.y==tempBuf_R[i].y)
// numResult++;
//numResult+=findNumResultInChunk_seq(tempBuf_R,tempValue.y);
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
{
d_output[base].x=tempBuf_R[i].x;
d_output[base].y=tempValue.x;
base++;
}
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
}
#endif
int joinMatchingBlocks(Record *d_R, int rLen, Record *d_S, int sLen,
int *d_quanLocS, int numQuan, Record** d_Rout)
{
int numResults=0;
int numThreadPerBlock =SMJ_NUM_THREADS_PER_BLOCK;
int numBlock_X=numQuan;
int numBlock_Y=1;
if(numBlock_X>NLJ_MAX_NUM_BLOCK_PER_DIM)
{
numBlock_Y=numBlock_X/NLJ_MAX_NUM_BLOCK_PER_DIM;
if(numBlock_X%NLJ_MAX_NUM_BLOCK_PER_DIM!=0)
numBlock_Y++;
numBlock_X=NLJ_MAX_NUM_BLOCK_PER_DIM;
}
dim3 threads_NLJ( numThreadPerBlock, 1, 1);
dim3 grid_NLJ( numBlock_X, numBlock_Y, 1);
int resultBuf=grid_NLJ.x*grid_NLJ.y*numThreadPerBlock;
printf("numThreadPerBlock,%d, numBlock_X, %d, numBlock_Y, %d\n", numThreadPerBlock, numBlock_X,numBlock_Y);
int* d_n;
GPUMALLOC((void**)&d_n, sizeof(int)*resultBuf );
//the prefix sum for d_n
int *d_sum;//the prefix sum for d_n[1,...,n]
GPUMALLOC((void**)&d_sum, sizeof(int)*resultBuf );
int* h_n ;
CPUMALLOC((void**)&h_n, sizeof(int));
int* h_sum ;
CPUMALLOC((void**)&h_sum, sizeof(int));
unsigned int timer=0;
//saven_initialPrefixSum(resultBuf);
startTimer(&timer);
#ifdef SHARED_MEM
printf( "YES, SHARED MEMORY, joinMBCount \n" );
#ifdef COALESCED
printf( "YES, COALESCED, joinMBCount\n" );
hipLaunchKernelGGL(( joinMBCount_kernel), dim3(grid_NLJ), dim3(threads_NLJ) , 0, 0, d_R, rLen, d_S, sLen,
d_quanLocS, numQuan, d_n);
#else
printf( "NO COALESCED, joinMBCount\n" );
hipLaunchKernelGGL(( joinMBCount_noCoalesced_kernel), dim3(grid_NLJ), dim3(threads_NLJ) , 0, 0, d_R, rLen, d_S, sLen,
d_quanLocS, numQuan, d_n);
#endif
#else
printf( "NO SHARED MEMORY, jonMBCount \n" );
Record* d_tempBuf_R;
GPUMALLOC( (void**)&d_tempBuf_R, sizeof(Record)*SMJ_NUM_THREADS_PER_BLOCK*grid_NLJ.x );
hipLaunchKernelGGL(( joinMBCount_kernel_noShared), dim3(grid_NLJ), dim3(threads_NLJ) , 0, 0, d_tempBuf_R, d_R, rLen, d_S, sLen,
d_quanLocS, numQuan, d_n);
GPUFREE( d_tempBuf_R );
#endif
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer("joinMBCount_kernel", &timer);
//gpuPrint(d_n,512,"d_n");
startTimer(&timer);
//prescanArray( d_sum,d_n, resultBuf);
scanImpl(d_n, resultBuf, d_sum);
FROMGPU(h_n, (d_n+resultBuf-1), sizeof(int));
FROMGPU(h_sum, (d_sum+resultBuf-1), sizeof(int));
numResults=*h_n+*h_sum;
printf("numResults=%d, ", numResults);
endTimer("prescanArray", &timer);
Record *d_outBuf;
if(numResults>0)
{
GPUMALLOC((void**) &d_outBuf, sizeof(Record)*numResults );
*d_Rout=d_outBuf;
startTimer(&timer);
#ifdef SHARED_MEM
printf( "YES, SHARED MEMORY, joinMBWrite\n" );
#ifdef COALESCED
printf( "YES, COALESCED, joinMBWrite\n" );
hipLaunchKernelGGL(( joinMBWrite_kernel), dim3(grid_NLJ), dim3(threads_NLJ) , 0, 0, d_R, rLen, d_S, sLen,
d_quanLocS, numQuan,d_sum, d_outBuf);
#else
printf( "NO COALESCED, joinMBWrite\n" );
hipLaunchKernelGGL(( joinMBWrite_noCoalesced_kernel), dim3(grid_NLJ), dim3(threads_NLJ) , 0, 0, d_R, rLen, d_S, sLen,
d_quanLocS, numQuan,d_sum, d_outBuf);
#endif
#else
printf( "NO SHARED MEMORY, joinMBWrite\n" );
Record* d_tempBuf_R;
GPUMALLOC( (void**)&d_tempBuf_R, sizeof(Record)*SMJ_NUM_THREADS_PER_BLOCK*grid_NLJ.x );
hipLaunchKernelGGL(( joinMBWrite_kernel_noShared), dim3(grid_NLJ), dim3(threads_NLJ) , 0, 0, d_tempBuf_R, d_R, rLen, d_S, sLen,
d_quanLocS, numQuan,d_sum, d_outBuf);
GPUFREE( d_tempBuf_R );
#endif
CUDA_SAFE_CALL(hipDeviceSynchronize());
endTimer("joinMBWrite_kernel", &timer);
}
GPUFREE(d_n);
GPUFREE(d_sum);
CPUFREE(h_n);
CPUFREE(h_sum);
return numResults;
}
#endif
| 232f006fe95c4ab21d41f7727077142c29bfebb8.cu | #ifndef JOIN_MATCHING_BLOCKS_CU
#define JOIN_MATCHING_BLOCKS_CU
#define NUM_DELTA_PER_BLOCK 8
#define SMJ_NUM_THREADS_PER_BLOCK 512
#ifdef BINARY_SEARCH
__device__
int findNumResultInChunk(Record records[], int key)
{
int min = 0;
int max = SMJ_NUM_THREADS_PER_BLOCK;
int mid;
int cut;
while(max - min > 1) {
mid = (min + max) / 2;
cut = records[mid].y;
if(key > cut)
min = mid;
else
max = mid;
}
if(records[min].y >= key)
return min;
else
return max;
}
#else
__device__
int findNumResultInChunk(Record records[], int key)
{
int numResult=0;
for(int i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(records[i].y==key)
numResult++;
else
if(records[i].y>key)
break;
return numResult;
}
#endif
//the best, with shared memory, with coalesced access
__global__ void
joinMBCount_kernel(Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_n)
{
__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
for(pos=sStart;(pos+tid)<sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
{
tempValue=d_S[pos+tid];
//for(i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
// if(tempValue.y==tempBuf_R[i].y)
// numResult++;
//numResult+=findNumResultInChunk_seq(tempBuf_R,tempValue.y);
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
numResult++;
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
else
numResult=0;
d_n[resultID]=numResult;
}
#ifndef SAHRED
__global__ void joinMBCount_kernel_noShared(Record* d_tempBuf_R, Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_n)
{
//__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
Record* tempBuf_R;
tempBuf_R = d_tempBuf_R + blockIdx.x*SMJ_NUM_THREADS_PER_BLOCK;
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
for(pos=sStart;(pos+tid)<sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
{
tempValue=d_S[pos+tid];
//for(i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
// if(tempValue.y==tempBuf_R[i].y)
// numResult++;
//numResult+=findNumResultInChunk_seq(tempBuf_R,tempValue.y);
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
numResult++;
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
else
numResult=0;
d_n[resultID]=numResult;
}
#endif
#ifndef COALESCED
__global__ void
joinMBCount_noCoalesced_kernel(Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_n)
{
__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
int len = (sEnd - sStart)/numThread;
int start = sStart + len*tid;
int end = start + len;
if( tid == (numThread - 1) )
{
end = sEnd;
}
//for(pos=sStart + tid; pos < sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
for(pos=start; pos < end;pos++)
{
tempValue=d_S[pos];
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
numResult++;
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
else
numResult=0;
d_n[resultID]=numResult;
}
#endif
//best, with shared memory, with coalesced
__global__ void
joinMBWrite_kernel(Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_sum, Record *d_output)
{
__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
int base=d_sum[resultID];
for(pos=sStart;(pos+tid)<sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
{
tempValue=d_S[pos+tid];
//for(i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
// if(tempValue.y==tempBuf_R[i].y)
// numResult++;
//numResult+=findNumResultInChunk_seq(tempBuf_R,tempValue.y);
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
{
d_output[base].x=tempBuf_R[i].x;
d_output[base].y=tempValue.x;
base++;
}
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
}
#ifndef COALESCED
__global__ void
joinMBWrite_noCoalesced_kernel(Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_sum, Record *d_output)
{
__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
int base=d_sum[resultID];
int len = (sEnd - sStart)/numThread;
int start = sStart + len*tid;
int end = start + len;
if( tid == (numThread - 1) )
{
end = sEnd;
}
for(pos = start; pos < end; pos++ )
{
tempValue=d_S[pos];
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
{
d_output[base].x=tempBuf_R[i].x;
d_output[base].y=tempValue.x;
base++;
}
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
}
#endif
#ifndef SHARED_MEM
__global__ void
joinMBWrite_kernel_noShared(Record* d_tempBuf_R, Record *d_R, int rLen, Record* d_S, int sLen,
int *d_quanLocS, int numQuan, int *d_sum, Record *d_output)
{
//__shared__ Record tempBuf_R[SMJ_NUM_THREADS_PER_BLOCK];
Record* tempBuf_R;
tempBuf_R = d_tempBuf_R + blockIdx.x*SMJ_NUM_THREADS_PER_BLOCK;
__shared__ int sStart;
__shared__ int sEnd;
const int by = blockIdx.y;
const int bx = blockIdx.x;
const int tx = threadIdx.x;
const int ty = threadIdx.y;
const int tid=tx+ty*blockDim.x;
const int bid=bx+by*gridDim.x;
const int numThread=blockDim.x;
const int resultID=(bid)*numThread+tid;
//int numResult=0;
if(bid<numQuan)
{
if(resultID<rLen)
tempBuf_R[tid]=d_R[resultID];
else
tempBuf_R[tid].y=TEST_MAX;
if(tid==0)
{
sStart=d_quanLocS[bid<<1];
sEnd=d_quanLocS[(bid<<1)+1];
}
__syncthreads();
int pos=0;
Record tempValue;
int i=0;
int startPos=0;
int base=d_sum[resultID];
for(pos=sStart;(pos+tid)<sEnd;pos+=SMJ_NUM_THREADS_PER_BLOCK)
{
tempValue=d_S[pos+tid];
//for(i=0;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
// if(tempValue.y==tempBuf_R[i].y)
// numResult++;
//numResult+=findNumResultInChunk_seq(tempBuf_R,tempValue.y);
startPos=findNumResultInChunk(tempBuf_R,tempValue.y);
for(i=startPos;i<SMJ_NUM_THREADS_PER_BLOCK;i++)
if(tempBuf_R[i].y==tempValue.y)
{
d_output[base].x=tempBuf_R[i].x;
d_output[base].y=tempValue.x;
base++;
}
else
if(tempBuf_R[i].y>tempValue.y)
break;
}
}
}
#endif
int joinMatchingBlocks(Record *d_R, int rLen, Record *d_S, int sLen,
int *d_quanLocS, int numQuan, Record** d_Rout)
{
int numResults=0;
int numThreadPerBlock =SMJ_NUM_THREADS_PER_BLOCK;
int numBlock_X=numQuan;
int numBlock_Y=1;
if(numBlock_X>NLJ_MAX_NUM_BLOCK_PER_DIM)
{
numBlock_Y=numBlock_X/NLJ_MAX_NUM_BLOCK_PER_DIM;
if(numBlock_X%NLJ_MAX_NUM_BLOCK_PER_DIM!=0)
numBlock_Y++;
numBlock_X=NLJ_MAX_NUM_BLOCK_PER_DIM;
}
dim3 threads_NLJ( numThreadPerBlock, 1, 1);
dim3 grid_NLJ( numBlock_X, numBlock_Y, 1);
int resultBuf=grid_NLJ.x*grid_NLJ.y*numThreadPerBlock;
printf("numThreadPerBlock,%d, numBlock_X, %d, numBlock_Y, %d\n", numThreadPerBlock, numBlock_X,numBlock_Y);
int* d_n;
GPUMALLOC((void**)&d_n, sizeof(int)*resultBuf );
//the prefix sum for d_n
int *d_sum;//the prefix sum for d_n[1,...,n]
GPUMALLOC((void**)&d_sum, sizeof(int)*resultBuf );
int* h_n ;
CPUMALLOC((void**)&h_n, sizeof(int));
int* h_sum ;
CPUMALLOC((void**)&h_sum, sizeof(int));
unsigned int timer=0;
//saven_initialPrefixSum(resultBuf);
startTimer(&timer);
#ifdef SHARED_MEM
printf( "YES, SHARED MEMORY, joinMBCount \n" );
#ifdef COALESCED
printf( "YES, COALESCED, joinMBCount\n" );
joinMBCount_kernel<<< grid_NLJ, threads_NLJ >>>(d_R, rLen, d_S, sLen,
d_quanLocS, numQuan, d_n);
#else
printf( "NO COALESCED, joinMBCount\n" );
joinMBCount_noCoalesced_kernel<<< grid_NLJ, threads_NLJ >>>(d_R, rLen, d_S, sLen,
d_quanLocS, numQuan, d_n);
#endif
#else
printf( "NO SHARED MEMORY, jonMBCount \n" );
Record* d_tempBuf_R;
GPUMALLOC( (void**)&d_tempBuf_R, sizeof(Record)*SMJ_NUM_THREADS_PER_BLOCK*grid_NLJ.x );
joinMBCount_kernel_noShared<<< grid_NLJ, threads_NLJ >>>(d_tempBuf_R, d_R, rLen, d_S, sLen,
d_quanLocS, numQuan, d_n);
GPUFREE( d_tempBuf_R );
#endif
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer("joinMBCount_kernel", &timer);
//gpuPrint(d_n,512,"d_n");
startTimer(&timer);
//prescanArray( d_sum,d_n, resultBuf);
scanImpl(d_n, resultBuf, d_sum);
FROMGPU(h_n, (d_n+resultBuf-1), sizeof(int));
FROMGPU(h_sum, (d_sum+resultBuf-1), sizeof(int));
numResults=*h_n+*h_sum;
printf("numResults=%d, ", numResults);
endTimer("prescanArray", &timer);
Record *d_outBuf;
if(numResults>0)
{
GPUMALLOC((void**) &d_outBuf, sizeof(Record)*numResults );
*d_Rout=d_outBuf;
startTimer(&timer);
#ifdef SHARED_MEM
printf( "YES, SHARED MEMORY, joinMBWrite\n" );
#ifdef COALESCED
printf( "YES, COALESCED, joinMBWrite\n" );
joinMBWrite_kernel<<< grid_NLJ, threads_NLJ >>>(d_R, rLen, d_S, sLen,
d_quanLocS, numQuan,d_sum, d_outBuf);
#else
printf( "NO COALESCED, joinMBWrite\n" );
joinMBWrite_noCoalesced_kernel<<< grid_NLJ, threads_NLJ >>>(d_R, rLen, d_S, sLen,
d_quanLocS, numQuan,d_sum, d_outBuf);
#endif
#else
printf( "NO SHARED MEMORY, joinMBWrite\n" );
Record* d_tempBuf_R;
GPUMALLOC( (void**)&d_tempBuf_R, sizeof(Record)*SMJ_NUM_THREADS_PER_BLOCK*grid_NLJ.x );
joinMBWrite_kernel_noShared<<< grid_NLJ, threads_NLJ >>>(d_tempBuf_R, d_R, rLen, d_S, sLen,
d_quanLocS, numQuan,d_sum, d_outBuf);
GPUFREE( d_tempBuf_R );
#endif
CUDA_SAFE_CALL(cudaThreadSynchronize());
endTimer("joinMBWrite_kernel", &timer);
}
GPUFREE(d_n);
GPUFREE(d_sum);
CPUFREE(h_n);
CPUFREE(h_sum);
return numResults;
}
#endif
|
5e87e118f7593cead874a8340e15df456d7487cf.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/hip/HIPBlas.h>
namespace at { namespace native {
Tensor &addmv_impl_cuda(Tensor& result, const Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta_, Scalar alpha_) {
auto r_stride = result.stride(0);
auto vec_stride = vec.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, mat.scalar_type(), "addmv_impl_cuda", [&] {
auto beta = beta_.to<scalar_t>();
auto alpha = alpha_.to<scalar_t>();
if (mat.stride(0) == 1) {
at::cuda::blas::gemv<scalar_t>('n',
mat.size(0), mat.size(1), alpha, mat.data_ptr<scalar_t>(), mat.stride(1), vec.data_ptr<scalar_t>(),
vec_stride, beta, result.data_ptr<scalar_t>(), r_stride);
}
else if (mat.stride(1) == 1) {
at::cuda::blas::gemv<scalar_t>('t',
mat.size(1), mat.size(0), alpha, mat.data_ptr<scalar_t>(), mat.stride(0),
vec.data_ptr<scalar_t>(), vec_stride, beta, result.data_ptr<scalar_t>(), r_stride);
}
else {
Tensor cmat = mat.contiguous();
at::cuda::blas::gemv<scalar_t>('t',
mat.size(1), mat.size(0), alpha, cmat.data_ptr<scalar_t>(), cmat.stride(0),
vec.data_ptr<scalar_t>(), vec.stride(0), beta, result.data_ptr<scalar_t>(), r_stride);
}
// In hipblasSgemv, hipblasDgemv (x,0).mv(0) does not
// handle beta, whereas hipblasSgemm, hipblasDgemm do for case where (x,0).mm(0,y).
// This logic could live in blas::gemv<float> and <double> if blas::gemv's interface
// can be extended to accept result as an argument.
if (std::is_same<scalar_t, float>::value || std::is_same<scalar_t, double>::value) {
if (vec.size(0) == 0 && mat.size(0) != 0) {
if (beta == scalar_t(0)) {
result.zero_();
} else if (beta != scalar_t(1)) {
result.mul_(beta);
}
}
}
});
return result;
}
}} // namespace at::native
| 5e87e118f7593cead874a8340e15df456d7487cf.cu | #include <ATen/ATen.h>
#include <ATen/Dispatch.h>
#include <ATen/cuda/CUDABlas.h>
namespace at { namespace native {
Tensor &addmv_impl_cuda(Tensor& result, const Tensor &self, const Tensor &mat, const Tensor &vec, Scalar beta_, Scalar alpha_) {
auto r_stride = result.stride(0);
auto vec_stride = vec.stride(0);
AT_DISPATCH_FLOATING_TYPES_AND2(at::ScalarType::Half, at::ScalarType::BFloat16, mat.scalar_type(), "addmv_impl_cuda", [&] {
auto beta = beta_.to<scalar_t>();
auto alpha = alpha_.to<scalar_t>();
if (mat.stride(0) == 1) {
at::cuda::blas::gemv<scalar_t>('n',
mat.size(0), mat.size(1), alpha, mat.data_ptr<scalar_t>(), mat.stride(1), vec.data_ptr<scalar_t>(),
vec_stride, beta, result.data_ptr<scalar_t>(), r_stride);
}
else if (mat.stride(1) == 1) {
at::cuda::blas::gemv<scalar_t>('t',
mat.size(1), mat.size(0), alpha, mat.data_ptr<scalar_t>(), mat.stride(0),
vec.data_ptr<scalar_t>(), vec_stride, beta, result.data_ptr<scalar_t>(), r_stride);
}
else {
Tensor cmat = mat.contiguous();
at::cuda::blas::gemv<scalar_t>('t',
mat.size(1), mat.size(0), alpha, cmat.data_ptr<scalar_t>(), cmat.stride(0),
vec.data_ptr<scalar_t>(), vec.stride(0), beta, result.data_ptr<scalar_t>(), r_stride);
}
// In cublasSgemv, cublasDgemv (x,0).mv(0) does not
// handle beta, whereas cublasSgemm, cublasDgemm do for case where (x,0).mm(0,y).
// This logic could live in blas::gemv<float> and <double> if blas::gemv's interface
// can be extended to accept result as an argument.
if (std::is_same<scalar_t, float>::value || std::is_same<scalar_t, double>::value) {
if (vec.size(0) == 0 && mat.size(0) != 0) {
if (beta == scalar_t(0)) {
result.zero_();
} else if (beta != scalar_t(1)) {
result.mul_(beta);
}
}
}
});
return result;
}
}} // namespace at::native
|
15170b286a15304657c34cee136fa48bae538fd4.hip | // !!! This is a file automatically generated by hipify!!!
#include "kernel_common.cuh"
namespace VLR {
RT_FUNCTION DirectionType sideTest(const Normal3D &ng, const Vector3D &d0, const Vector3D &d1) {
bool reflect = dot(Vector3D(ng), d0) * dot(Vector3D(ng), d1) > 0;
return DirectionType::AllFreq() | (reflect ? DirectionType::Reflection() : DirectionType::Transmission());
}
class FresnelConductor {
SampledSpectrum m_eta;
SampledSpectrum m_k;
public:
RT_FUNCTION FresnelConductor(const SampledSpectrum &eta, const SampledSpectrum &k) : m_eta(eta), m_k(k) {}
RT_FUNCTION SampledSpectrum evaluate(float cosEnter) const {
cosEnter = ::fabs(cosEnter);
float cosEnter2 = cosEnter * cosEnter;
SampledSpectrum _2EtaCosEnter = 2.0f * m_eta * cosEnter;
SampledSpectrum tmp_f = m_eta * m_eta + m_k * m_k;
SampledSpectrum tmp = tmp_f * cosEnter2;
SampledSpectrum Rparl2 = (tmp - _2EtaCosEnter + 1) / (tmp + _2EtaCosEnter + 1);
SampledSpectrum Rperp2 = (tmp_f - _2EtaCosEnter + cosEnter2) / (tmp_f + _2EtaCosEnter + cosEnter2);
return (Rparl2 + Rperp2) / 2.0f;
}
RT_FUNCTION float evaluate(float cosEnter, uint32_t wlIdx) const {
cosEnter = ::fabs(cosEnter);
float cosEnter2 = cosEnter * cosEnter;
float _2EtaCosEnter = 2.0f * m_eta[wlIdx] * cosEnter;
float tmp_f = m_eta[wlIdx] * m_eta[wlIdx] + m_k[wlIdx] * m_k[wlIdx];
float tmp = tmp_f * cosEnter2;
float Rparl2 = (tmp - _2EtaCosEnter + 1) / (tmp + _2EtaCosEnter + 1);
float Rperp2 = (tmp_f - _2EtaCosEnter + cosEnter2) / (tmp_f + _2EtaCosEnter + cosEnter2);
return (Rparl2 + Rperp2) / 2.0f;
}
};
class FresnelDielectric {
SampledSpectrum m_etaExt;
SampledSpectrum m_etaInt;
public:
RT_FUNCTION FresnelDielectric(const SampledSpectrum &etaExt, const SampledSpectrum &etaInt) : m_etaExt(etaExt), m_etaInt(etaInt) {}
RT_FUNCTION SampledSpectrum etaExt() const { return m_etaExt; }
RT_FUNCTION SampledSpectrum etaInt() const { return m_etaInt; }
RT_FUNCTION SampledSpectrum evaluate(float cosEnter) const {
cosEnter = clamp(cosEnter, -1.0f, 1.0f);
bool entering = cosEnter > 0.0f;
const SampledSpectrum &eEnter = entering ? m_etaExt : m_etaInt;
const SampledSpectrum &eExit = entering ? m_etaInt : m_etaExt;
SampledSpectrum sinExit = eEnter / eExit * std::sqrt(std::fmax(0.0f, 1.0f - cosEnter * cosEnter));
SampledSpectrum ret = SampledSpectrum::Zero();
cosEnter = ::fabs(cosEnter);
for (int i = 0; i < SampledSpectrum::NumComponents(); ++i) {
if (sinExit[i] >= 1.0f) {
ret[i] = 1.0f;
}
else {
float cosExit = std::sqrt(std::fmax(0.0f, 1.0f - sinExit[i] * sinExit[i]));
ret[i] = evalF(eEnter[i], eExit[i], cosEnter, cosExit);
}
}
return ret;
}
RT_FUNCTION float evaluate(float cosEnter, uint32_t wlIdx) const {
cosEnter = clamp(cosEnter, -1.0f, 1.0f);
bool entering = cosEnter > 0.0f;
const float &eEnter = entering ? m_etaExt[wlIdx] : m_etaInt[wlIdx];
const float &eExit = entering ? m_etaInt[wlIdx] : m_etaExt[wlIdx];
float sinExit = eEnter / eExit * std::sqrt(std::fmax(0.0f, 1.0f - cosEnter * cosEnter));
cosEnter = ::fabs(cosEnter);
if (sinExit >= 1.0f) {
return 1.0f;
}
else {
float cosExit = std::sqrt(std::fmax(0.0f, 1.0f - sinExit * sinExit));
return evalF(eEnter, eExit, cosEnter, cosExit);
}
}
RT_FUNCTION static float evalF(float etaEnter, float etaExit, float cosEnter, float cosExit);
};
RT_FUNCTION float FresnelDielectric::evalF(float etaEnter, float etaExit, float cosEnter, float cosExit) {
float Rparl = ((etaExit * cosEnter) - (etaEnter * cosExit)) / ((etaExit * cosEnter) + (etaEnter * cosExit));
float Rperp = ((etaEnter * cosEnter) - (etaExit * cosExit)) / ((etaEnter * cosEnter) + (etaExit * cosExit));
return (Rparl * Rparl + Rperp * Rperp) / 2.0f;
}
class FresnelSchlick {
// assume vacuum-dielectric interface
float m_F0;
public:
RT_FUNCTION FresnelSchlick(float F0) : m_F0(F0) {}
RT_FUNCTION SampledSpectrum evaluate(float cosEnter) const {
bool entering = cosEnter >= 0;
float cosEval = cosEnter;
if (!entering) {
float sqrtF0 = std::sqrt(m_F0);
float etaExit = (1 + sqrtF0) / (1 - sqrtF0);
float invRelIOR = 1.0f / etaExit;
float sinExit2 = invRelIOR * invRelIOR * std::fmax(0.0f, 1.0f - cosEnter * cosEnter);
if (sinExit2 > 1.0f) {
return SampledSpectrum::One();
}
cosEval = std::sqrt(1 - sinExit2);
}
return SampledSpectrum(m_F0 + (1.0f - m_F0) * pow5(1 - cosEval));
}
};
class GGXMicrofacetDistribution {
float m_alpha_gx;
float m_alpha_gy;
float m_cosRt;
float m_sinRt;
public:
RT_FUNCTION GGXMicrofacetDistribution(float alpha_gx, float alpha_gy, float rotation) :
m_alpha_gx(alpha_gx), m_alpha_gy(alpha_gy) {
VLR::sincos(rotation, &m_sinRt, &m_cosRt);
}
RT_FUNCTION float evaluate(const Normal3D &m) {
Normal3D mr = Normal3D(m_cosRt * m.x + m_sinRt * m.y,
-m_sinRt * m.x + m_cosRt * m.y,
m.z);
if (mr.z <= 0)
return 0.0f;
float temp = pow2(mr.x / m_alpha_gx) + pow2(mr.y / m_alpha_gy) + pow2(mr.z);
return 1.0f / (M_PIf * m_alpha_gx * m_alpha_gy * pow2(temp));
}
RT_FUNCTION float evaluateSmithG1(const Vector3D &v, const Normal3D &m) {
Vector3D vr = Vector3D(m_cosRt * v.x + m_sinRt * v.y,
-m_sinRt * v.x + m_cosRt * v.y,
v.z);
float alpha_g2_tanTheta2 = (pow2(vr.x * m_alpha_gx) + pow2(vr.y * m_alpha_gy)) / pow2(vr.z);
float Lambda = (-1 + std::sqrt(1 + alpha_g2_tanTheta2)) / 2;
float chi = (dot(v, m) / v.z) > 0 ? 1 : 0;
return chi / (1 + Lambda);
}
RT_FUNCTION float evaluateHeightCorrelatedSmithG(const Vector3D &v1, const Vector3D &v2, const Normal3D &m) {
Vector3D v1r = Vector3D(m_cosRt * v1.x + m_sinRt * v1.y,
-m_sinRt * v1.x + m_cosRt * v1.y,
v1.z);
Vector3D v2r = Vector3D(m_cosRt * v2.x + m_sinRt * v2.y,
-m_sinRt * v2.x + m_cosRt * v2.y,
v2.z);
float alpha_g2_tanTheta2_1 = (pow2(v1r.x * m_alpha_gx) + pow2(v1r.y * m_alpha_gy)) / pow2(v1r.z);
float alpha_g2_tanTheta2_2 = (pow2(v2r.x * m_alpha_gx) + pow2(v2r.y * m_alpha_gy)) / pow2(v2r.z);
float Lambda1 = (-1 + std::sqrt(1 + alpha_g2_tanTheta2_1)) / 2;
float Lambda2 = (-1 + std::sqrt(1 + alpha_g2_tanTheta2_2)) / 2;
float chi1 = (dot(v1, m) / v1.z) > 0 ? 1 : 0;
float chi2 = (dot(v2, m) / v2.z) > 0 ? 1 : 0;
return chi1 * chi2 / (1 + Lambda1 + Lambda2);
}
RT_FUNCTION float sample(const Vector3D &v, float u0, float u1, Normal3D* m, float* normalPDF) {
Vector3D vr = Vector3D(m_cosRt * v.x + m_sinRt * v.y,
-m_sinRt * v.x + m_cosRt * v.y,
v.z);
// stretch view
Vector3D sv = normalize(Vector3D(m_alpha_gx * vr.x, m_alpha_gy * vr.y, vr.z));
// orthonormal basis
// Vector3D T1 = (sv.z < 0.9999f) ? normalize(cross(sv, Vector3D::Ez)) : Vector3D::Ex;
// Vector3D T2 = cross(T1, sv);
float distIn2D = std::sqrt(sv.x * sv.x + sv.y * sv.y);
float recDistIn2D = 1.0f / distIn2D;
Vector3D T1 = (sv.z < 0.9999f) ? Vector3D(sv.y * recDistIn2D, -sv.x * recDistIn2D, 0) : Vector3D::Ex();
Vector3D T2 = Vector3D(T1.y * sv.z, -T1.x * sv.z, distIn2D);
// sample point with polar coordinates (r, phi)
float a = 1.0f / (1.0f + sv.z);
float r = std::sqrt(u0);
float phi = M_PIf * ((u1 < a) ? u1 / a : 1 + (u1 - a) / (1.0f - a));
float sinPhi, cosPhi;
VLR::sincos(phi, &sinPhi, &cosPhi);
float P1 = r * cosPhi;
float P2 = r * sinPhi * ((u1 < a) ? 1.0f : sv.z);
// compute normal
Normal3D mr = P1 * T1 + P2 * T2 + std::sqrt(1.0f - P1 * P1 - P2 * P2) * sv;
// unstretch
mr = normalize(Normal3D(m_alpha_gx * mr.x, m_alpha_gy * mr.y, mr.z));
float D = evaluate(mr);
*normalPDF = evaluateSmithG1(vr, mr) * absDot(vr, mr) * D / ::fabs(vr.z);
*m = Normal3D(m_cosRt * mr.x - m_sinRt * mr.y,
m_sinRt * mr.x + m_cosRt * mr.y,
mr.z);
return D;
}
RT_FUNCTION float evaluatePDF(const Vector3D &v, const Normal3D &m) {
return evaluateSmithG1(v, m) * absDot(v, m) * evaluate(m) / ::fabs(v.z);
}
};
// ----------------------------------------------------------------
// NullBSDF
RT_CALLABLE_PROGRAM uint32_t NullBSDF_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
return 0;
}
RT_CALLABLE_PROGRAM SampledSpectrum NullBSDF_getBaseColor(const uint32_t* params) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM bool NullBSDF_matches(const uint32_t* params, DirectionType flags) {
return false;
}
RT_CALLABLE_PROGRAM SampledSpectrum NullBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM SampledSpectrum NullBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM float NullBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return 0.0f;
}
RT_CALLABLE_PROGRAM float NullBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
return 0.0f;
}
// END: NullBSDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// MatteBRDF
struct MatteBRDF {
SampledSpectrum albedo;
float roughness;
};
RT_CALLABLE_PROGRAM uint32_t MatteSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
MatteBRDF &p = *(MatteBRDF*)params;
auto &mat = *(const MatteSurfaceMaterial*)matDesc;
p.albedo = calcNode(mat.nodeAlbedo, mat.immAlbedo, surfPt, wls);
p.roughness = 0.0f;
return sizeof(MatteBRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum MatteBRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const MatteBRDF*)params;
return p.albedo;
}
RT_CALLABLE_PROGRAM bool MatteBRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::LowFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum MatteBRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const MatteBRDF*)params;
result->dirLocal = cosineSampleHemisphere(uDir[0], uDir[1]);
result->dirPDF = result->dirLocal.z / M_PIf;
result->sampledType = DirectionType::Reflection() | DirectionType::LowFreq();
result->dirLocal.z *= query.dirLocal.z >= 0 ? 1 : -1;
return p.albedo / M_PIf;
}
RT_CALLABLE_PROGRAM SampledSpectrum MatteBRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MatteBRDF*)params;
if (query.dirLocal.z * dirLocal.z <= 0.0f) {
SampledSpectrum fs = SampledSpectrum::Zero();
return fs;
}
SampledSpectrum fs = p.albedo / M_PIf;
return fs;
}
RT_CALLABLE_PROGRAM float MatteBRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
if (query.dirLocal.z * dirLocal.z <= 0.0f) {
return 0.0f;
}
return ::fabs(dirLocal.z) / M_PIf;
}
RT_CALLABLE_PROGRAM float MatteBRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const MatteBRDF*)params;
return p.albedo.importance(query.wlHint);
}
// END: MatteBRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// SpecularBRDF
struct SpecularBRDF {
SampledSpectrum coeffR;
SampledSpectrum eta;
SampledSpectrum k;
};
RT_CALLABLE_PROGRAM uint32_t SpecularReflectionSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(SpecularBRDF*)params;
auto &mat = *(const SpecularReflectionSurfaceMaterial*)matDesc;
p.coeffR = calcNode(mat.nodeCoeffR, mat.immCoeffR, surfPt, wls);
p.eta = calcNode(mat.nodeEta, mat.immEta, surfPt, wls);
p.k = calcNode(mat.node_k, mat.imm_k, surfPt, wls);
return sizeof(SpecularBRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const SpecularBRDF*)params;
return p.coeffR;
}
RT_CALLABLE_PROGRAM bool SpecularBRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::Delta0D();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const SpecularBRDF*)params;
FresnelConductor fresnel(p.eta, p.k);
result->dirLocal = Vector3D(-query.dirLocal.x, -query.dirLocal.y, query.dirLocal.z);
result->dirPDF = 1.0f;
result->sampledType = DirectionType::Reflection() | DirectionType::Delta0D();
SampledSpectrum fs = p.coeffR * fresnel.evaluate(query.dirLocal.z) / ::fabs(query.dirLocal.z);
return fs;
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM float SpecularBRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return 0.0f;
}
RT_CALLABLE_PROGRAM float SpecularBRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const SpecularBRDF*)params;
FresnelDielectric fresnel(p.eta, p.k);
return (p.coeffR * fresnel.evaluate(query.dirLocal.z)).importance(query.wlHint);
}
// END: SpecularBRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// SpecularBSDF
struct SpecularBSDF {
SampledSpectrum coeff;
SampledSpectrum etaExt;
SampledSpectrum etaInt;
bool dispersive;
};
RT_CALLABLE_PROGRAM uint32_t SpecularScatteringSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(SpecularBSDF*)params;
auto &mat = *(const SpecularScatteringSurfaceMaterial*)matDesc;
p.coeff = calcNode(mat.nodeCoeff, mat.immCoeff, surfPt, wls);
p.etaExt = calcNode(mat.nodeEtaExt, mat.immEtaExt, surfPt, wls);
p.etaInt = calcNode(mat.nodeEtaInt, mat.immEtaInt, surfPt, wls);
p.dispersive = !wls.singleIsSelected();
return sizeof(SpecularBSDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBSDF_getBaseColor(const uint32_t* params) {
auto &p = *(const SpecularBSDF*)params;
return p.coeff;
}
RT_CALLABLE_PROGRAM bool SpecularBSDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::WholeSphere() | DirectionType::Delta0D();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const SpecularBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
const SampledSpectrum &eEnter = entering ? p.etaExt : p.etaInt;
const SampledSpectrum &eExit = entering ? p.etaInt : p.etaExt;
FresnelDielectric fresnel(eEnter, eExit);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
SampledSpectrum F = fresnel.evaluate(dirV.z);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (uComponent < reflectProb) {
if (dirV.z == 0.0f) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
Vector3D dirL = Vector3D(-dirV.x, -dirV.y, dirV.z);
result->dirLocal = entering ? dirL : -dirL;
result->dirPDF = reflectProb;
result->sampledType = DirectionType::Reflection() | DirectionType::Delta0D();
SampledSpectrum fs = p.coeff * F / ::fabs(dirV.z);
return fs;
}
else {
float sinEnter2 = 1.0f - dirV.z * dirV.z;
float recRelIOR = eEnter[query.wlHint] / eExit[query.wlHint];// reciprocal of relative IOR.
float sinExit2 = recRelIOR * recRelIOR * sinEnter2;
if (sinExit2 >= 1.0f) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float cosExit = std::sqrt(std::fmax(0.0f, 1.0f - sinExit2));
Vector3D dirL = Vector3D(recRelIOR * -dirV.x, recRelIOR * -dirV.y, -cosExit);
result->dirLocal = entering ? dirL : -dirL;
result->dirPDF = 1.0f - reflectProb;
result->sampledType = DirectionType::Transmission() | DirectionType::Delta0D() | (p.dispersive ? DirectionType::Dispersive() : DirectionType());
SampledSpectrum ret = SampledSpectrum::Zero();
ret[query.wlHint] = p.coeff[query.wlHint] * (1.0f - F[query.wlHint]);
SampledSpectrum fs = ret / ::fabs(cosExit);
return fs;
}
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM float SpecularBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return 0.0f;
}
RT_CALLABLE_PROGRAM float SpecularBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const SpecularBSDF*)params;
return p.coeff.importance(query.wlHint);
}
// END: SpecularBSDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// MicrofacetBRDF
struct MicrofacetBRDF {
SampledSpectrum eta;
SampledSpectrum k;
float alphaX;
float alphaY;
float rotation;
};
RT_CALLABLE_PROGRAM uint32_t MicrofacetReflectionSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(MicrofacetBRDF*)params;
auto &mat = *(const MicrofacetReflectionSurfaceMaterial*)matDesc;
p.eta = calcNode(mat.nodeEta, mat.immEta, surfPt, wls);
p.k = calcNode(mat.node_k, mat.imm_k, surfPt, wls);
optix::float3 roughnessAnisotropyRotation = calcNode(mat.nodeRoughnessAnisotropyRotation,
optix::make_float3(mat.immRoughness, mat.immAnisotropy, mat.immRotation),
surfPt, wls);
float alpha = pow2(roughnessAnisotropyRotation.x);
float aspect = std::sqrt(1.0f - 0.9f * roughnessAnisotropyRotation.y);
p.alphaX = std::fmax(0.001f, alpha / aspect);
p.alphaY = std::fmax(0.001f, alpha * aspect);
p.rotation = 2 * M_PIf * roughnessAnisotropyRotation.z;
return sizeof(MicrofacetBRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const MicrofacetBRDF*)params;
FresnelConductor fresnel(p.eta, p.k);
return fresnel.evaluate(1.0f);
}
RT_CALLABLE_PROGRAM bool MicrofacetBRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::HighFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const MicrofacetBRDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelConductor fresnel(p.eta, p.k);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
// JP:
// EN: sample a half vector, then generate a resulting direction sample based on it.
Normal3D m;
float mPDF;
float D = ggx.sample(dirV, uDir[0], uDir[1], &m, &mPDF);
float dotHV = dot(dirV, m);
if (dotHV <= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
Vector3D dirL = 2 * dotHV * m - dirV;
result->dirLocal = entering ? dirL : -dirL;
if (dirL.z * dirV.z <= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float commonPDFTerm = 1.0f / (4 * dotHV);
result->dirPDF = commonPDFTerm * mPDF;
result->sampledType = DirectionType::Reflection() | DirectionType::HighFreq();
SampledSpectrum F = fresnel.evaluate(dotHV);
float G = ggx.evaluateSmithG1(dirV, m) * ggx.evaluateSmithG1(dirL, m);
SampledSpectrum fs = F * D * G / (4 * dirV.z * dirL.z);
//VLRAssert(fs.allFinite(), "fs: %s, F: %s, G, %g, D: %g, wlIdx: %u, qDir: %s, rDir: %s",
// fs.toString().c_str(), F.toString().c_str(), G, D, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return fs;
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MicrofacetBRDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelConductor fresnel(p.eta, p.k);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
float dotNVdotNL = dirL.z * dirV.z;
if (dotNVdotNL <= 0)
return SampledSpectrum::Zero();
Normal3D m = halfVector(dirV, dirL);
float dotHV = dot(dirV, m);
float D = ggx.evaluate(m);
SampledSpectrum F = fresnel.evaluate(dotHV);
float G = ggx.evaluateSmithG1(dirV, m) * ggx.evaluateSmithG1(dirL, m);
SampledSpectrum fs = F * D * G / (4 * dotNVdotNL);
//VLRAssert(fs.allFinite(), "fs: %s, F: %s, G, %g, D: %g, wlIdx: %u, qDir: %s, dir: %s",
// fs.toString().c_str(), F.toString().c_str(), G, D, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return fs;
}
RT_CALLABLE_PROGRAM float MicrofacetBRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MicrofacetBRDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelConductor fresnel(p.eta, p.k);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
float dotNVdotNL = dirL.z * dirV.z;
if (dotNVdotNL <= 0.0f)
return 0.0f;
Normal3D m = halfVector(dirV, dirL);
float dotHV = dot(dirV, m);
if (dotHV <= 0)
return 0.0f;
float mPDF = ggx.evaluatePDF(dirV, m);
float commonPDFTerm = 1.0f / (4 * dotHV);
float ret = commonPDFTerm * mPDF;
//VLRAssert(std::isfinite(commonPDFTerm) && std::isfinite(mPDF),
// "commonPDFTerm: %g, mPDF: %g, wlIdx: %u, qDir: %s, dir: %s",
// commonPDFTerm, mPDF, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return ret;
}
RT_CALLABLE_PROGRAM float MicrofacetBRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const MicrofacetBRDF*)params;
FresnelConductor fresnel(p.eta, p.k);
float expectedDotHV = query.dirLocal.z;
return fresnel.evaluate(expectedDotHV).importance(query.wlHint);
}
// END: MicrofacetBRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// MicrofacetBSDF
struct MicrofacetBSDF {
SampledSpectrum coeff;
SampledSpectrum etaExt;
SampledSpectrum etaInt;
float alphaX;
float alphaY;
float rotation;
};
RT_CALLABLE_PROGRAM uint32_t MicrofacetScatteringSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(MicrofacetBSDF*)params;
auto &mat = *(const MicrofacetScatteringSurfaceMaterial*)matDesc;
p.coeff = calcNode(mat.nodeCoeff, mat.immCoeff, surfPt, wls);
p.etaExt = calcNode(mat.nodeEtaExt, mat.immEtaExt, surfPt, wls);
p.etaInt = calcNode(mat.nodeEtaInt, mat.immEtaInt, surfPt, wls);
optix::float3 roughnessAnisotropyRotation = calcNode(mat.nodeRoughnessAnisotropyRotation,
optix::make_float3(mat.immRoughness, mat.immAnisotropy, mat.immRotation),
surfPt, wls);
float alpha = pow2(roughnessAnisotropyRotation.x);
float aspect = std::sqrt(1 - 0.9f * roughnessAnisotropyRotation.y);
p.alphaX = std::fmax(0.001f, alpha / aspect);
p.alphaY = std::fmax(0.001f, alpha * aspect);
p.rotation = 2 * M_PIf * roughnessAnisotropyRotation.z;
return sizeof(MicrofacetBSDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBSDF_getBaseColor(const uint32_t* params) {
auto &p = *(const MicrofacetBSDF*)params;
return p.coeff;
}
RT_CALLABLE_PROGRAM bool MicrofacetBSDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::WholeSphere() | DirectionType::HighFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const MicrofacetBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
const SampledSpectrum &eEnter = entering ? p.etaExt : p.etaInt;
const SampledSpectrum &eExit = entering ? p.etaInt : p.etaExt;
FresnelDielectric fresnel(eEnter, eExit);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
// JP:
// EN: sample a half vector.
Normal3D m;
float mPDF;
float D = ggx.sample(dirV, uDir[0], uDir[1], &m, &mPDF);
float dotHV = dot(dirV, m);
if (dotHV <= 0 || std::isnan(D)) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
// JP:
// EN: calculate the Fresnel term using the sampled half vector, then select reflection or transmission.
SampledSpectrum F = fresnel.evaluate(dotHV);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (uComponent < reflectProb) {
// JP:
// EN: calculate a resulting direction.
Vector3D dirL = 2 * dotHV * m - dirV;
result->dirLocal = entering ? dirL : -dirL;
if (dirL.z * dirV.z <= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float commonPDFTerm = reflectProb / (4 * dotHV);
result->dirPDF = commonPDFTerm * mPDF;
result->sampledType = DirectionType::Reflection() | DirectionType::HighFreq();
float G = ggx.evaluateSmithG1(dirV, m) * ggx.evaluateSmithG1(dirL, m);
SampledSpectrum fs = F * D * G / (4 * dirV.z * dirL.z);
//VLRAssert(fs.allFinite(), "fs: %s, F: %g, %g, %g, G, %g, D: %g, wlIdx: %u, qDir: (%g, %g, %g), rDir: (%g, %g, %g)",
// fs.toString().c_str(), F.toString().c_str(), G, D, query.wlHint,
// dirV.x, dirV.y, dirV.z, dirL.x, dirL.y, dirL.z);
return fs;
}
else {
// JP:
// EN: calculate a resulting direction.
float recRelIOR = eEnter[query.wlHint] / eExit[query.wlHint];
float innerRoot = 1 + recRelIOR * recRelIOR * (dotHV * dotHV - 1);
if (innerRoot < 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
Vector3D dirL = (recRelIOR * dotHV - std::sqrt(innerRoot)) * m - recRelIOR * dirV;
result->dirLocal = entering ? dirL : -dirL;
if (dirL.z * dirV.z >= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float dotHL = dot(dirL, m);
float commonPDFTerm = (1 - reflectProb) / ::pow(eEnter[query.wlHint] * dotHV + eExit[query.wlHint] * dotHL, 2);
result->dirPDF = commonPDFTerm * mPDF * eExit[query.wlHint] * eExit[query.wlHint] * ::fabs(dotHL);
result->sampledType = DirectionType::Transmission() | DirectionType::HighFreq();
// JP: BSDF
// EN: calculate the value of each term of the microfacet BSDF for each wavelength component.
SampledSpectrum ret = SampledSpectrum::Zero();
for (int wlIdx = 0; wlIdx < SampledSpectrum::NumComponents(); ++wlIdx) {
Normal3D m_wl = normalize(-(eEnter[wlIdx] * dirV + eExit[wlIdx] * dirL) * (entering ? 1 : -1));
float dotHV_wl = dot(dirV, m_wl);
float dotHL_wl = dot(dirL, m_wl);
float F_wl = fresnel.evaluate(dotHV_wl, wlIdx);
float G_wl = ggx.evaluateSmithG1(dirV, m_wl) * ggx.evaluateSmithG1(dirL, m_wl);
float D_wl = ggx.evaluate(m_wl);
ret[wlIdx] = ::fabs(dotHV_wl * dotHL_wl) * (1 - F_wl) * G_wl * D_wl / ::pow(eEnter[wlIdx] * dotHV_wl + eExit[wlIdx] * dotHL_wl, 2);
//VLRAssert(std::isfinite(ret[wlIdx]), "fs: %g, F: %g, G, %g, D: %g, wlIdx: %u, qDir: %s",
// ret[wlIdx], F_wl, G_wl, D_wl, query.wlHint, dirV.toString().c_str());
}
ret /= ::fabs(dirV.z * dirL.z);
ret *= eEnter * eEnter;
//ret *= query.adjoint ? (eExit * eExit) : (eEnter * eEnter);// adjoint: need to cancel eEnter^2 / eExit^2 => eEnter^2 * (eExit^2 / eEnter^2)
//VLRAssert(ret.allFinite(), "fs: %s, wlIdx: %u, qDir: %s, rDir: %s",
// ret.toString().c_str(), query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return ret;
}
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MicrofacetBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
const SampledSpectrum &eEnter = entering ? p.etaExt : p.etaInt;
const SampledSpectrum &eExit = entering ? p.etaInt : p.etaExt;
FresnelDielectric fresnel(eEnter, eExit);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
float dotNVdotNL = dirL.z * dirV.z;
if (dotNVdotNL > 0 && query.dirTypeFilter.matches(DirectionType::Reflection() | DirectionType::AllFreq())) {
Normal3D m = halfVector(dirV, dirL);
float dotHV = dot(dirV, m);
float D = ggx.evaluate(m);
SampledSpectrum F = fresnel.evaluate(dotHV);
float G = ggx.evaluateSmithG1(dirV, m) * ggx.evaluateSmithG1(dirL, m);
SampledSpectrum fs = F * D * G / (4 * dotNVdotNL);
//VLRAssert(fs.allFinite(), "fs: %s, F: %s, G, %g, D: %g, wlIdx: %u, qDir: %s, dir: %s",
// fs.toString().c_str(), F.toString().c_str(), G, D, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return fs;
}
else if (dotNVdotNL < 0 && query.dirTypeFilter.matches(DirectionType::Transmission() | DirectionType::AllFreq())) {
SampledSpectrum ret = SampledSpectrum::Zero();
for (int wlIdx = 0; wlIdx < SampledSpectrum::NumComponents(); ++wlIdx) {
Normal3D m_wl = normalize(-(eEnter[wlIdx] * dirV + eExit[wlIdx] * dirL) * (entering ? 1 : -1));
float dotHV_wl = dot(dirV, m_wl);
float dotHL_wl = dot(dirL, m_wl);
float F_wl = fresnel.evaluate(dotHV_wl, wlIdx);
float G_wl = ggx.evaluateSmithG1(dirV, m_wl) * ggx.evaluateSmithG1(dirL, m_wl);
float D_wl = ggx.evaluate(m_wl);
ret[wlIdx] = ::fabs(dotHV_wl * dotHL_wl) * (1 - F_wl) * G_wl * D_wl / ::pow(eEnter[wlIdx] * dotHV_wl + eExit[wlIdx] * dotHL_wl, 2);
//VLRAssert(std::isfinite(ret[wlIdx]), "fs: %g, F: %g, G, %g, D: %g, wlIdx: %u, qDir: %s, dir: %s",
// ret[wlIdx], F_wl, G_wl, D_wl, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
}
ret /= ::fabs(dotNVdotNL);
ret *= eEnter * eEnter;
//ret *= query.adjoint ? (eExit * eExit) : (eEnter * eEnter);// !adjoint: eExit^2 * (eEnter / eExit)^2
//VLRAssert(ret.allFinite(), "fs: %s, wlIdx: %u, qDir: %s, dir: %s",
// ret.toString().c_str(), query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return ret;
}
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM float MicrofacetBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MicrofacetBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
const SampledSpectrum &eEnter = entering ? p.etaExt : p.etaInt;
const SampledSpectrum &eExit = entering ? p.etaInt : p.etaExt;
FresnelDielectric fresnel(eEnter, eExit);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
float dotNVdotNL = dirL.z * dirV.z;
if (dotNVdotNL == 0)
return 0.0f;
Normal3D m;
if (dotNVdotNL > 0)
m = halfVector(dirV, dirL);
else
m = normalize(-(eEnter[query.wlHint] * dirV + eExit[query.wlHint] * dirL));
float dotHV = dot(dirV, m);
if (dotHV <= 0)
return 0.0f;
float mPDF = ggx.evaluatePDF(dirV, m);
SampledSpectrum F = fresnel.evaluate(dotHV);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (dotNVdotNL > 0) {
float commonPDFTerm = reflectProb / (4 * dotHV);
//VLRAssert(std::isfinite(commonPDFTerm) && std::isfinite(mPDF),
// "commonPDFTerm: %g, mPDF: %g, F: %s, wlIdx: %u, qDir: %s, dir: %s",
// commonPDFTerm, mPDF, F.toString().c_str(), query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return commonPDFTerm * mPDF;
}
else {
float dotHL = dot(dirL, m);
float commonPDFTerm = (1 - reflectProb) / ::pow(eEnter[query.wlHint] * dotHV + eExit[query.wlHint] * dotHL, 2);
//VLRAssert(std::isfinite(commonPDFTerm) && std::isfinite(mPDF),
// "commonPDFTerm: %g, mPDF: %g, F: %s, wlIdx: %u, qDir: %s, dir: %s",
// commonPDFTerm, mPDF, F.toString().c_str(), query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return commonPDFTerm * mPDF * eExit[query.wlHint] * eExit[query.wlHint] * ::fabs(dotHL);
}
}
RT_CALLABLE_PROGRAM float MicrofacetBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const MicrofacetBSDF*)params;
return p.coeff.importance(query.wlHint);
}
// END: MicrofacetBSDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// LambertianBSDF
struct LambertianBSDF {
SampledSpectrum coeff;
float F0;
};
RT_CALLABLE_PROGRAM uint32_t LambertianScatteringSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(LambertianBSDF*)params;
auto &mat = *(const LambertianScatteringSurfaceMaterial*)matDesc;
p.coeff = calcNode(mat.nodeCoeff, mat.immCoeff, surfPt, wls);
p.F0 = calcNode(mat.nodeF0, mat.immF0, surfPt, wls);
return sizeof(LambertianBSDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum LambertianBSDF_getBaseColor(const uint32_t* params) {
auto &p = *(const LambertianBSDF*)params;
return p.coeff;
}
RT_CALLABLE_PROGRAM bool LambertianBSDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::WholeSphere() | DirectionType::LowFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum LambertianBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const LambertianBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelSchlick fresnel(p.F0);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = cosineSampleHemisphere(uDir[0], uDir[1]);
result->dirPDF = dirL.z / M_PIf;
SampledSpectrum F = fresnel.evaluate(query.dirLocal.z);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (uComponent < reflectProb) {
result->dirLocal = entering ? dirL : -dirL;
result->sampledType = DirectionType::Reflection() | DirectionType::LowFreq();
SampledSpectrum fs = F * p.coeff / M_PIf;
result->dirPDF *= reflectProb;
return fs;
}
else {
result->dirLocal = entering ? -dirL : dirL;
result->sampledType = DirectionType::Transmission() | DirectionType::LowFreq();
SampledSpectrum fs = (SampledSpectrum::One() - F) * p.coeff / M_PIf;
result->dirPDF *= (1 - reflectProb);
return fs;
}
}
RT_CALLABLE_PROGRAM SampledSpectrum LambertianBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const LambertianBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelSchlick fresnel(p.F0);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
SampledSpectrum F = fresnel.evaluate(query.dirLocal.z);
if (dirV.z * dirL.z > 0.0f) {
SampledSpectrum fs = F * p.coeff / M_PIf;
return fs;
}
else {
SampledSpectrum fs = (SampledSpectrum::One() - F) * p.coeff / M_PIf;
return fs;
}
}
RT_CALLABLE_PROGRAM float LambertianBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const LambertianBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelSchlick fresnel(p.F0);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
SampledSpectrum F = fresnel.evaluate(query.dirLocal.z);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (dirV.z * dirL.z > 0.0f) {
float dirPDF = reflectProb * dirL.z / M_PIf;
return dirPDF;
}
else {
float dirPDF = (1 - reflectProb) * ::fabs(dirL.z) / M_PIf;
return dirPDF;
}
}
RT_CALLABLE_PROGRAM float LambertianBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const LambertianBSDF*)params;
return p.coeff.importance(query.wlHint);
}
// END: LambertianBSDF
// ----------------------------------------------------------------
#define USE_HEIGHT_CORRELATED_SMITH
RT_FUNCTION SampledSpectrum diffuseAndSpecularBRDF_sampleInternal(const SampledSpectrum &diffuseColor, const SampledSpectrum &specularF0Color, float roughness,
const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
float alpha = roughness * roughness;
GGXMicrofacetDistribution ggx(alpha, alpha, 0.0f);
bool entering = query.dirLocal.z >= 0.0f;
Vector3D dirL;
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
float expectedF_D90 = 0.5f * roughness + 2 * roughness * query.dirLocal.z * query.dirLocal.z;
float oneMinusDotVN5 = ::pow(1 - dirV.z, 5);
float expectedDiffuseFresnel = lerp(1.0f, expectedF_D90, oneMinusDotVN5);
float iBaseColor = diffuseColor.importance(query.wlHint) * expectedDiffuseFresnel * expectedDiffuseFresnel * lerp(1.0f, 1.0f / 1.51f, roughness);
float expectedOneMinusDotVH5 = ::pow(1 - dirV.z, 5);
float iSpecularF0 = specularF0Color.importance(query.wlHint);
float diffuseWeight = iBaseColor;
float specularWeight = lerp(iSpecularF0, 1.0f, expectedOneMinusDotVH5);
float weights[] = { diffuseWeight, specularWeight };
float probSelection;
float sumWeights = 0.0f;
uint32_t component = sampleDiscrete(weights, 2, uComponent, &probSelection, &sumWeights, &uComponent);
float diffuseDirPDF, specularDirPDF;
SampledSpectrum fs;
Normal3D m;
float dotLH;
float D;
if (component == 0) {
result->sampledType = DirectionType::Reflection() | DirectionType::LowFreq();
// JP:
// EN: sample based on cosine distribution.
dirL = cosineSampleHemisphere(uDir[0], uDir[1]);
diffuseDirPDF = dirL.z / M_PIf;
// JP:
// EN: calculate PDFs to generate the sampled direction from the other distributions.
m = halfVector(dirL, dirV);
dotLH = dot(dirL, m);
float commonPDFTerm = 1.0f / (4 * dotLH);
specularDirPDF = commonPDFTerm * ggx.evaluatePDF(dirV, m);
D = ggx.evaluate(m);
}
else if (component == 1) {
result->sampledType = DirectionType::Reflection() | DirectionType::HighFreq();
// ----------------------------------------------------------------
// JP:
// EN: sample based on the base specular microfacet distribution.
float mPDF;
D = ggx.sample(dirV, uDir[0], uDir[1], &m, &mPDF);
float dotVH = dot(dirV, m);
dotLH = dotVH;
dirL = 2 * dotVH * m - dirV;
if (dirL.z * dirV.z <= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float commonPDFTerm = 1.0f / (4 * dotLH);
specularDirPDF = commonPDFTerm * mPDF;
// ----------------------------------------------------------------
// JP:
// EN: calculate PDFs to generate the sampled direction from the other distributions.
diffuseDirPDF = dirL.z / M_PIf;
}
float oneMinusDotLH5 = ::pow(1 - dotLH, 5);
#if defined(USE_HEIGHT_CORRELATED_SMITH)
float G = ggx.evaluateHeightCorrelatedSmithG(dirL, dirV, m);
#else
float G = ggx.evaluateSmithG1(dirL, m) * ggx.evaluateSmithG1(dirV, m);
#endif
SampledSpectrum F = lerp(specularF0Color, SampledSpectrum::One(), oneMinusDotLH5);
float microfacetDenom = 4 * dirL.z * dirV.z;
SampledSpectrum specularValue = F * ((D * G) / microfacetDenom);
float F_D90 = 0.5f * roughness + 2 * roughness * dotLH * dotLH;
float oneMinusDotLN5 = ::pow(1 - dirL.z, 5);
float diffuseFresnelOut = lerp(1.0f, F_D90, oneMinusDotVN5);
float diffuseFresnelIn = lerp(1.0f, F_D90, oneMinusDotLN5);
SampledSpectrum diffuseValue = diffuseColor * (diffuseFresnelOut * diffuseFresnelIn * lerp(1.0f, 1.0f / 1.51f, roughness) / M_PIf);
SampledSpectrum ret = diffuseValue + specularValue;
result->dirLocal = entering ? dirL : -dirL;
// PDF based on the single-sample model MIS.
result->dirPDF = (diffuseDirPDF * diffuseWeight + specularDirPDF * specularWeight) / sumWeights;
return ret;
}
RT_FUNCTION SampledSpectrum diffuseAndSpecularBRDF_evaluateInternal(const SampledSpectrum &diffuseColor, const SampledSpectrum &specularF0Color, float roughness,
const BSDFQuery &query, const Vector3D &dirLocal) {
float alpha = roughness * roughness;
GGXMicrofacetDistribution ggx(alpha, alpha, 0.0f);
if (dirLocal.z * query.dirLocal.z <= 0) {
return SampledSpectrum::Zero();
}
bool entering = query.dirLocal.z >= 0.0f;
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
Normal3D m = halfVector(dirL, dirV);
float dotLH = dot(dirL, m);
float oneMinusDotLH5 = ::pow(1 - dotLH, 5);
float D = ggx.evaluate(m);
#if defined(USE_HEIGHT_CORRELATED_SMITH)
float G = ggx.evaluateHeightCorrelatedSmithG(dirL, dirV, m);
#else
float G = ggx.evaluateSmithG1(dirL, m) * ggx.evaluateSmithG1(dirV, m);
#endif
SampledSpectrum F = lerp(specularF0Color, SampledSpectrum::One(), oneMinusDotLH5);
float microfacetDenom = 4 * dirL.z * dirV.z;
SampledSpectrum specularValue = F * ((D * G) / microfacetDenom);
float F_D90 = 0.5f * roughness + 2 * roughness * dotLH * dotLH;
float oneMinusDotVN5 = ::pow(1 - dirV.z, 5);
float oneMinusDotLN5 = ::pow(1 - dirL.z, 5);
float diffuseFresnelOut = lerp(1.0f, F_D90, oneMinusDotVN5);
float diffuseFresnelIn = lerp(1.0f, F_D90, oneMinusDotLN5);
SampledSpectrum diffuseValue = diffuseColor * (diffuseFresnelOut * diffuseFresnelIn * lerp(1.0f, 1.0f / 1.51f, roughness) / M_PIf);
SampledSpectrum ret = diffuseValue + specularValue;
return ret;
}
RT_FUNCTION float diffuseAndSpecularBRDF_evaluatePDFInternal(const SampledSpectrum &diffuseColor, const SampledSpectrum &specularF0Color, float roughness,
const BSDFQuery &query, const Vector3D &dirLocal) {
float alpha = roughness * roughness;
GGXMicrofacetDistribution ggx(alpha, alpha, 0.0f);
bool entering = query.dirLocal.z >= 0.0f;
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
Normal3D m = halfVector(dirL, dirV);
float dotLH = dot(dirL, m);
float commonPDFTerm = 1.0f / (4 * dotLH);
float expectedF_D90 = 0.5f * roughness + 2 * roughness * query.dirLocal.z * query.dirLocal.z;
float oneMinusDotVN5 = ::pow(1 - dirV.z, 5);
float expectedDiffuseFresnel = lerp(1.0f, expectedF_D90, oneMinusDotVN5);
float iBaseColor = diffuseColor.importance(query.wlHint) * expectedDiffuseFresnel * expectedDiffuseFresnel * lerp(1.0f, 1.0f / 1.51f, roughness);
float expectedOneMinusDotVH5 = ::pow(1 - dirV.z, 5);
float iSpecularF0 = specularF0Color.importance(query.wlHint);
float diffuseWeight = iBaseColor;
float specularWeight = lerp(iSpecularF0, 1.0f, expectedOneMinusDotVH5);
float sumWeights = diffuseWeight + specularWeight;
float diffuseDirPDF = dirL.z / M_PIf;
float specularDirPDF = commonPDFTerm * ggx.evaluatePDF(dirV, m);
float ret = (diffuseDirPDF * diffuseWeight + specularDirPDF * specularWeight) / sumWeights;
return ret;
}
RT_FUNCTION float diffuseAndSpecularBRDF_weightInternal(const SampledSpectrum &diffuseColor, const SampledSpectrum &specularF0Color, float roughness,
const BSDFQuery &query) {
bool entering = query.dirLocal.z >= 0.0f;
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
float expectedF_D90 = 0.5f * roughness + 2 * roughness * query.dirLocal.z * query.dirLocal.z;
float oneMinusDotVN5 = ::pow(1 - dirV.z, 5);
float expectedDiffuseFresnel = lerp(1.0f, expectedF_D90, oneMinusDotVN5);
float iBaseColor = diffuseColor.importance(query.wlHint) * expectedDiffuseFresnel * expectedDiffuseFresnel * lerp(1.0f, 1.0f / 1.51f, roughness);
float expectedOneMinusDotVH5 = ::pow(1 - dirV.z, 5);
float iSpecularF0 = specularF0Color.importance(query.wlHint);
float diffuseWeight = iBaseColor;
float specularWeight = lerp(iSpecularF0, 1.0f, expectedOneMinusDotVH5);
return diffuseWeight + specularWeight;
}
// ----------------------------------------------------------------
// UE4 (Modified) BRDF
struct UE4BRDF {
SampledSpectrum baseColor;
float roughness;
float metallic;
};
RT_CALLABLE_PROGRAM uint32_t UE4SurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(UE4BRDF*)params;
auto &mat = *(const UE4SurfaceMaterial*)matDesc;
p.baseColor = calcNode(mat.nodeBaseColor, mat.immBaseColor, surfPt, wls);
optix::float3 occlusionRoughnessMetallic = calcNode(mat.nodeOcclusionRoughnessMetallic,
optix::make_float3(mat.immOcclusion, mat.immRoughness, mat.immMetallic),
surfPt, wls);
p.roughness = std::fmax(0.01f, occlusionRoughnessMetallic.y);
p.metallic = occlusionRoughnessMetallic.z;
return sizeof(UE4BRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum UE4BRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const UE4BRDF*)params;
return p.baseColor;
}
RT_CALLABLE_PROGRAM bool UE4BRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::LowFreq() | DirectionType::HighFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum UE4BRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const UE4BRDF*)params;
const float specular = 0.5f;
SampledSpectrum diffuseColor = p.baseColor * (1 - p.metallic);
SampledSpectrum specularF0Color = lerp(0.08f * specular * SampledSpectrum::One(), p.baseColor, p.metallic);
return diffuseAndSpecularBRDF_sampleInternal(diffuseColor, specularF0Color, p.roughness,
query, uComponent, uDir, result);
}
RT_CALLABLE_PROGRAM SampledSpectrum UE4BRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const UE4BRDF*)params;
const float specular = 0.5f;
SampledSpectrum diffuseColor = p.baseColor * (1 - p.metallic);
SampledSpectrum specularF0Color = lerp(0.08f * specular * SampledSpectrum::One(), p.baseColor, p.metallic);
return diffuseAndSpecularBRDF_evaluateInternal(diffuseColor, specularF0Color, p.roughness,
query, dirLocal);
}
RT_CALLABLE_PROGRAM float UE4BRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const UE4BRDF*)params;
const float specular = 0.5f;
SampledSpectrum diffuseColor = p.baseColor * (1 - p.metallic);
SampledSpectrum specularF0Color = lerp(0.08f * specular * SampledSpectrum::One(), p.baseColor, p.metallic);
return diffuseAndSpecularBRDF_evaluatePDFInternal(diffuseColor, specularF0Color, p.roughness,
query, dirLocal);
}
RT_CALLABLE_PROGRAM float UE4BRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const UE4BRDF*)params;
const float specular = 0.5f;
SampledSpectrum diffuseColor = p.baseColor * (1 - p.metallic);
SampledSpectrum specularF0Color = lerp(0.08f * specular * SampledSpectrum::One(), p.baseColor, p.metallic);
return diffuseAndSpecularBRDF_weightInternal(diffuseColor, specularF0Color, p.roughness,
query);
}
// END: UE4 (Modified) BRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// Old style BRDF
struct OldStyleBRDF {
SampledSpectrum diffuseColor;
SampledSpectrum specularColor;
float glossiness;
};
RT_CALLABLE_PROGRAM uint32_t OldStyleSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(OldStyleBRDF*)params;
auto &mat = *(const OldStyleSurfaceMaterial*)matDesc;
p.diffuseColor = calcNode(mat.nodeDiffuseColor, mat.immDiffuseColor, surfPt, wls);
p.specularColor = calcNode(mat.nodeSpecularColor, mat.immSpecularColor, surfPt, wls);
p.glossiness = std::fmin(0.99f, calcNode(mat.nodeGlossiness, mat.immGlossiness, surfPt, wls));
return sizeof(OldStyleBRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum OldStyleBRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const OldStyleBRDF*)params;
return p.diffuseColor + p.specularColor;
}
RT_CALLABLE_PROGRAM bool OldStyleBRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::LowFreq() | DirectionType::HighFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum OldStyleBRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const OldStyleBRDF*)params;
return diffuseAndSpecularBRDF_sampleInternal(p.diffuseColor, p.specularColor, 1 - p.glossiness,
query, uComponent, uDir, result);
}
RT_CALLABLE_PROGRAM SampledSpectrum OldStyleBRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const OldStyleBRDF*)params;
return diffuseAndSpecularBRDF_evaluateInternal(p.diffuseColor, p.specularColor, 1 - p.glossiness,
query, dirLocal);
}
RT_CALLABLE_PROGRAM float OldStyleBRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const OldStyleBRDF*)params;
return diffuseAndSpecularBRDF_evaluatePDFInternal(p.diffuseColor, p.specularColor, 1 - p.glossiness,
query, dirLocal);
}
RT_CALLABLE_PROGRAM float OldStyleBRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const OldStyleBRDF*)params;
return diffuseAndSpecularBRDF_weightInternal(p.diffuseColor, p.specularColor, 1 - p.glossiness,
query);
}
// END: Old style BRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// NullEDF
RT_CALLABLE_PROGRAM uint32_t NullEDF_setupEDF(const uint32_t* matDesc, const SurfacePoint &surfPt, uint32_t* params) {
return 0;
}
RT_CALLABLE_PROGRAM SampledSpectrum NullEDF_evaluateEmittanceInternal(const uint32_t* params) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM SampledSpectrum NullEDF_evaluateInternal(const uint32_t* params, const EDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum::Zero();
}
// END: NullEDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// DiffuseEDF
struct DiffuseEDF {
SampledSpectrum emittance;
};
RT_CALLABLE_PROGRAM uint32_t DiffuseEmitterSurfaceMaterial_setupEDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(DiffuseEDF*)params;
auto &mat = *(const DiffuseEmitterSurfaceMaterial*)matDesc;
p.emittance = calcNode(mat.nodeEmittance, mat.immEmittance, surfPt, wls);
return sizeof(DiffuseEDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum DiffuseEDF_evaluateEmittanceInternal(const uint32_t* params) {
auto &p = *(const DiffuseEDF*)params;
return p.emittance;
}
RT_CALLABLE_PROGRAM SampledSpectrum DiffuseEDF_evaluateInternal(const uint32_t* params, const EDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum(dirLocal.z > 0.0f ? 1.0f / M_PIf : 0.0f);
}
// END: DiffuseEDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// MultiBSDF / MultiEDF
// bsdf0-3: param offsets
// numBSDFs
// --------------------------------
// BSDF0 procedure set index
// BSDF0 params
// ...
// BSDF3 procedure set index
// BSDF3 params
struct MultiBSDF {
struct {
unsigned int bsdf0 : 6;
unsigned int bsdf1 : 6;
unsigned int bsdf2 : 6;
unsigned int bsdf3 : 6;
unsigned int numBSDFs : 8;
};
};
RT_CALLABLE_PROGRAM uint32_t MultiSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(MultiBSDF*)params;
auto &mat = *(const MultiSurfaceMaterial*)matDesc;
uint32_t baseIndex = sizeof(MultiBSDF) / 4;
uint32_t bsdfOffsets[4] = { 0, 0, 0, 0 };
for (int i = 0; i < mat.numSubMaterials; ++i) {
bsdfOffsets[i] = baseIndex;
const SurfaceMaterialDescriptor subMatDesc = pv_materialDescriptorBuffer[mat.subMatIndices[i]];
ProgSigSetupBSDF setupBSDF = (ProgSigSetupBSDF)subMatDesc.progSetupBSDF;
*(params + baseIndex++) = subMatDesc.bsdfProcedureSetIndex;
baseIndex += setupBSDF(subMatDesc.data, surfPt, wls, params + baseIndex);
}
p.bsdf0 = bsdfOffsets[0];
p.bsdf1 = bsdfOffsets[1];
p.bsdf2 = bsdfOffsets[2];
p.bsdf3 = bsdfOffsets[3];
p.numBSDFs = mat.numSubMaterials;
//vlrDevPrintf("%u, %u, %u, %u, %u mats\n", p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3, p.numBSDFs);
return baseIndex;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiBSDF_getBaseColor(const uint32_t* params) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
SampledSpectrum ret;
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFGetBaseColor getBaseColor = (ProgSigBSDFGetBaseColor)procSet.progGetBaseColor;
ret += getBaseColor(bsdf + 1);
}
return ret;
}
RT_CALLABLE_PROGRAM bool MultiBSDF_matches(const uint32_t* params, DirectionType flags) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFmatches matches = (ProgSigBSDFmatches)procSet.progMatches;
if (matches(bsdf + 1, flags))
return true;
}
return false;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
float weights[4];
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFWeightInternal weightInternal = (ProgSigBSDFWeightInternal)procSet.progWeightInternal;
weights[i] = weightInternal(bsdf + 1, query);
}
// JP: BSDFBSDF
// EN: Based on the weight of each BSDF, select a BSDF from which direction sampling.
float tempProb;
float sumWeights;
uint32_t idx = sampleDiscrete(weights, p.numBSDFs, uComponent, &tempProb, &sumWeights, &uComponent);
if (sumWeights == 0.0f) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
const uint32_t* selectedBSDF = params + bsdfOffsets[idx];
uint32_t selProcIdx = *(const uint32_t*)selectedBSDF;
const BSDFProcedureSet selProcSet = pv_bsdfProcedureSetBuffer[selProcIdx];
ProgSigBSDFSampleInternal sampleInternal = (ProgSigBSDFSampleInternal)selProcSet.progSampleInternal;
// JP: BSDF
// EN: sample a direction from the selected BSDF.
SampledSpectrum value = sampleInternal(selectedBSDF + 1, query, uComponent, uDir, result);
result->dirPDF *= weights[idx];
if (result->dirPDF == 0.0f) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
// JP: BSDFsingle-sample model MIS
// EN: calculate the total of BSDF values and a PDF based on the single-sample model MIS for the sampled direction.
if (!result->sampledType.isDelta()) {
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFmatches matches = (ProgSigBSDFmatches)procSet.progMatches;
ProgSigBSDFEvaluatePDFInternal evaluatePDFInternal = (ProgSigBSDFEvaluatePDFInternal)procSet.progEvaluatePDFInternal;
if (i != idx && matches(bsdf + 1, query.dirTypeFilter))
result->dirPDF += evaluatePDFInternal(bsdf + 1, query, result->dirLocal) * weights[i];
}
BSDFQuery mQuery = query;
mQuery.dirTypeFilter &= sideTest(query.geometricNormalLocal, query.dirLocal, result->dirLocal);
value = SampledSpectrum::Zero();
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFmatches matches = (ProgSigBSDFmatches)procSet.progMatches;
ProgSigBSDFEvaluateInternal evaluateInternal = (ProgSigBSDFEvaluateInternal)procSet.progEvaluateInternal;
if (!matches(bsdf + 1, mQuery.dirTypeFilter))
continue;
value += evaluateInternal(bsdf + 1, mQuery, result->dirLocal);
}
}
result->dirPDF /= sumWeights;
return value;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
SampledSpectrum retValue = SampledSpectrum::Zero();
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFmatches matches = (ProgSigBSDFmatches)procSet.progMatches;
ProgSigBSDFEvaluateInternal evaluateInternal = (ProgSigBSDFEvaluateInternal)procSet.progEvaluateInternal;
if (!matches(bsdf + 1, query.dirTypeFilter))
continue;
retValue += evaluateInternal(bsdf + 1, query, dirLocal);
}
return retValue;
}
RT_CALLABLE_PROGRAM float MultiBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
float sumWeights = 0.0f;
float weights[4];
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFWeightInternal weightInternal = (ProgSigBSDFWeightInternal)procSet.progWeightInternal;
weights[i] = weightInternal(bsdf + 1, query);
sumWeights += weights[i];
}
if (sumWeights == 0.0f)
return 0.0f;
float retPDF = 0.0f;
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFEvaluatePDFInternal evaluatePDFInternal = (ProgSigBSDFEvaluatePDFInternal)procSet.progEvaluatePDFInternal;
if (weights[i] > 0)
retPDF += evaluatePDFInternal(bsdf + 1, query, dirLocal) * weights[i];
}
retPDF /= sumWeights;
return retPDF;
}
RT_CALLABLE_PROGRAM float MultiBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
float ret = 0.0f;
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFWeightInternal weightInternal = (ProgSigBSDFWeightInternal)procSet.progWeightInternal;
ret += weightInternal(bsdf + 1, query);
}
return ret;
}
// edf0-3: param offsets
// numEDFs
// --------------------------------
// EDF0 procedure set index
// EDF0 params
// ...
// EDF3 procedure set index
// EDF3 params
struct MultiEDF {
struct {
unsigned int edf0 : 6;
unsigned int edf1 : 6;
unsigned int edf2 : 6;
unsigned int edf3 : 6;
unsigned int numEDFs : 8;
};
};
RT_CALLABLE_PROGRAM uint32_t MultiSurfaceMaterial_setupEDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(MultiEDF*)params;
auto &mat = *(const MultiSurfaceMaterial*)matDesc;
uint32_t baseIndex = sizeof(MultiEDF) / 4;
uint32_t edfOffsets[4] = { 0, 0, 0, 0 };
for (int i = 0; i < mat.numSubMaterials; ++i) {
edfOffsets[i] = baseIndex;
const SurfaceMaterialDescriptor subMatDesc = pv_materialDescriptorBuffer[mat.subMatIndices[i]];
ProgSigSetupEDF setupEDF = (ProgSigSetupEDF)subMatDesc.progSetupEDF;
*(params + baseIndex++) = subMatDesc.edfProcedureSetIndex;
baseIndex += setupEDF(subMatDesc.data, surfPt, wls, params + baseIndex);
}
p.edf0 = edfOffsets[0];
p.edf1 = edfOffsets[1];
p.edf2 = edfOffsets[2];
p.edf3 = edfOffsets[3];
p.numEDFs = mat.numSubMaterials;
return baseIndex;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiEDF_evaluateEmittanceInternal(const uint32_t* params) {
auto &p = *(const MultiEDF*)params;
uint32_t edfOffsets[4] = { p.edf0, p.edf1, p.edf2, p.edf3 };
SampledSpectrum ret = SampledSpectrum::Zero();
for (int i = 0; i < p.numEDFs; ++i) {
const uint32_t* edf = params + edfOffsets[i];
uint32_t procIdx = *(const uint32_t*)edf;
const EDFProcedureSet procSet = pv_edfProcedureSetBuffer[procIdx];
ProgSigEDFEvaluateEmittanceInternal evaluateEmittanceInternal = (ProgSigEDFEvaluateEmittanceInternal)procSet.progEvaluateEmittanceInternal;
ret += evaluateEmittanceInternal(edf + 1);
}
return ret;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiEDF_evaluateInternal(const uint32_t* params, const EDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MultiEDF*)params;
uint32_t edfOffsets[4] = { p.edf0, p.edf1, p.edf2, p.edf3 };
SampledSpectrum ret = SampledSpectrum::Zero();
SampledSpectrum sumEmittance = SampledSpectrum::Zero();
for (int i = 0; i < p.numEDFs; ++i) {
const uint32_t* edf = params + edfOffsets[i];
uint32_t procIdx = *(const uint32_t*)edf;
const EDFProcedureSet procSet = pv_edfProcedureSetBuffer[procIdx];
ProgSigEDFEvaluateEmittanceInternal evaluateEmittanceInternal = (ProgSigEDFEvaluateEmittanceInternal)procSet.progEvaluateEmittanceInternal;
ProgSigEDFEvaluateInternal evaluateInternal = (ProgSigEDFEvaluateInternal)procSet.progEvaluateInternal;
SampledSpectrum emittance = evaluateEmittanceInternal(edf + 1);
sumEmittance += emittance;
ret += emittance * evaluateInternal(edf + 1, query, dirLocal);
}
ret.safeDivide(sumEmittance);
return ret;
}
// END: MultiBSDF / MultiEDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// EnvironmentEDF
struct EnvironmentEDF {
SampledSpectrum emittance;
};
RT_CALLABLE_PROGRAM uint32_t EnvironmentEmitterSurfaceMaterial_setupEDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(EnvironmentEDF*)params;
auto &mat = *(const EnvironmentEmitterSurfaceMaterial*)matDesc;
p.emittance = calcNode(mat.nodeEmittance, mat.immEmittance, surfPt, wls) * mat.immScale;
return sizeof(EnvironmentEDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum EnvironmentEDF_evaluateEmittanceInternal(const uint32_t* params) {
auto &p = *(const EnvironmentEDF*)params;
return M_PIf * p.emittance;
}
RT_CALLABLE_PROGRAM SampledSpectrum EnvironmentEDF_evaluateInternal(const uint32_t* params, const EDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum(dirLocal.z > 0.0f ? 1.0f / M_PIf : 0.0f);
}
// END: EnvironmentEDF
// ----------------------------------------------------------------
}
| 15170b286a15304657c34cee136fa48bae538fd4.cu | #include "kernel_common.cuh"
namespace VLR {
RT_FUNCTION DirectionType sideTest(const Normal3D &ng, const Vector3D &d0, const Vector3D &d1) {
bool reflect = dot(Vector3D(ng), d0) * dot(Vector3D(ng), d1) > 0;
return DirectionType::AllFreq() | (reflect ? DirectionType::Reflection() : DirectionType::Transmission());
}
class FresnelConductor {
SampledSpectrum m_eta;
SampledSpectrum m_k;
public:
RT_FUNCTION FresnelConductor(const SampledSpectrum &eta, const SampledSpectrum &k) : m_eta(eta), m_k(k) {}
RT_FUNCTION SampledSpectrum evaluate(float cosEnter) const {
cosEnter = std::fabs(cosEnter);
float cosEnter2 = cosEnter * cosEnter;
SampledSpectrum _2EtaCosEnter = 2.0f * m_eta * cosEnter;
SampledSpectrum tmp_f = m_eta * m_eta + m_k * m_k;
SampledSpectrum tmp = tmp_f * cosEnter2;
SampledSpectrum Rparl2 = (tmp - _2EtaCosEnter + 1) / (tmp + _2EtaCosEnter + 1);
SampledSpectrum Rperp2 = (tmp_f - _2EtaCosEnter + cosEnter2) / (tmp_f + _2EtaCosEnter + cosEnter2);
return (Rparl2 + Rperp2) / 2.0f;
}
RT_FUNCTION float evaluate(float cosEnter, uint32_t wlIdx) const {
cosEnter = std::fabs(cosEnter);
float cosEnter2 = cosEnter * cosEnter;
float _2EtaCosEnter = 2.0f * m_eta[wlIdx] * cosEnter;
float tmp_f = m_eta[wlIdx] * m_eta[wlIdx] + m_k[wlIdx] * m_k[wlIdx];
float tmp = tmp_f * cosEnter2;
float Rparl2 = (tmp - _2EtaCosEnter + 1) / (tmp + _2EtaCosEnter + 1);
float Rperp2 = (tmp_f - _2EtaCosEnter + cosEnter2) / (tmp_f + _2EtaCosEnter + cosEnter2);
return (Rparl2 + Rperp2) / 2.0f;
}
};
class FresnelDielectric {
SampledSpectrum m_etaExt;
SampledSpectrum m_etaInt;
public:
RT_FUNCTION FresnelDielectric(const SampledSpectrum &etaExt, const SampledSpectrum &etaInt) : m_etaExt(etaExt), m_etaInt(etaInt) {}
RT_FUNCTION SampledSpectrum etaExt() const { return m_etaExt; }
RT_FUNCTION SampledSpectrum etaInt() const { return m_etaInt; }
RT_FUNCTION SampledSpectrum evaluate(float cosEnter) const {
cosEnter = clamp(cosEnter, -1.0f, 1.0f);
bool entering = cosEnter > 0.0f;
const SampledSpectrum &eEnter = entering ? m_etaExt : m_etaInt;
const SampledSpectrum &eExit = entering ? m_etaInt : m_etaExt;
SampledSpectrum sinExit = eEnter / eExit * std::sqrt(std::fmax(0.0f, 1.0f - cosEnter * cosEnter));
SampledSpectrum ret = SampledSpectrum::Zero();
cosEnter = std::fabs(cosEnter);
for (int i = 0; i < SampledSpectrum::NumComponents(); ++i) {
if (sinExit[i] >= 1.0f) {
ret[i] = 1.0f;
}
else {
float cosExit = std::sqrt(std::fmax(0.0f, 1.0f - sinExit[i] * sinExit[i]));
ret[i] = evalF(eEnter[i], eExit[i], cosEnter, cosExit);
}
}
return ret;
}
RT_FUNCTION float evaluate(float cosEnter, uint32_t wlIdx) const {
cosEnter = clamp(cosEnter, -1.0f, 1.0f);
bool entering = cosEnter > 0.0f;
const float &eEnter = entering ? m_etaExt[wlIdx] : m_etaInt[wlIdx];
const float &eExit = entering ? m_etaInt[wlIdx] : m_etaExt[wlIdx];
float sinExit = eEnter / eExit * std::sqrt(std::fmax(0.0f, 1.0f - cosEnter * cosEnter));
cosEnter = std::fabs(cosEnter);
if (sinExit >= 1.0f) {
return 1.0f;
}
else {
float cosExit = std::sqrt(std::fmax(0.0f, 1.0f - sinExit * sinExit));
return evalF(eEnter, eExit, cosEnter, cosExit);
}
}
RT_FUNCTION static float evalF(float etaEnter, float etaExit, float cosEnter, float cosExit);
};
RT_FUNCTION float FresnelDielectric::evalF(float etaEnter, float etaExit, float cosEnter, float cosExit) {
float Rparl = ((etaExit * cosEnter) - (etaEnter * cosExit)) / ((etaExit * cosEnter) + (etaEnter * cosExit));
float Rperp = ((etaEnter * cosEnter) - (etaExit * cosExit)) / ((etaEnter * cosEnter) + (etaExit * cosExit));
return (Rparl * Rparl + Rperp * Rperp) / 2.0f;
}
class FresnelSchlick {
// assume vacuum-dielectric interface
float m_F0;
public:
RT_FUNCTION FresnelSchlick(float F0) : m_F0(F0) {}
RT_FUNCTION SampledSpectrum evaluate(float cosEnter) const {
bool entering = cosEnter >= 0;
float cosEval = cosEnter;
if (!entering) {
float sqrtF0 = std::sqrt(m_F0);
float etaExit = (1 + sqrtF0) / (1 - sqrtF0);
float invRelIOR = 1.0f / etaExit;
float sinExit2 = invRelIOR * invRelIOR * std::fmax(0.0f, 1.0f - cosEnter * cosEnter);
if (sinExit2 > 1.0f) {
return SampledSpectrum::One();
}
cosEval = std::sqrt(1 - sinExit2);
}
return SampledSpectrum(m_F0 + (1.0f - m_F0) * pow5(1 - cosEval));
}
};
class GGXMicrofacetDistribution {
float m_alpha_gx;
float m_alpha_gy;
float m_cosRt;
float m_sinRt;
public:
RT_FUNCTION GGXMicrofacetDistribution(float alpha_gx, float alpha_gy, float rotation) :
m_alpha_gx(alpha_gx), m_alpha_gy(alpha_gy) {
VLR::sincos(rotation, &m_sinRt, &m_cosRt);
}
RT_FUNCTION float evaluate(const Normal3D &m) {
Normal3D mr = Normal3D(m_cosRt * m.x + m_sinRt * m.y,
-m_sinRt * m.x + m_cosRt * m.y,
m.z);
if (mr.z <= 0)
return 0.0f;
float temp = pow2(mr.x / m_alpha_gx) + pow2(mr.y / m_alpha_gy) + pow2(mr.z);
return 1.0f / (M_PIf * m_alpha_gx * m_alpha_gy * pow2(temp));
}
RT_FUNCTION float evaluateSmithG1(const Vector3D &v, const Normal3D &m) {
Vector3D vr = Vector3D(m_cosRt * v.x + m_sinRt * v.y,
-m_sinRt * v.x + m_cosRt * v.y,
v.z);
float alpha_g2_tanTheta2 = (pow2(vr.x * m_alpha_gx) + pow2(vr.y * m_alpha_gy)) / pow2(vr.z);
float Lambda = (-1 + std::sqrt(1 + alpha_g2_tanTheta2)) / 2;
float chi = (dot(v, m) / v.z) > 0 ? 1 : 0;
return chi / (1 + Lambda);
}
RT_FUNCTION float evaluateHeightCorrelatedSmithG(const Vector3D &v1, const Vector3D &v2, const Normal3D &m) {
Vector3D v1r = Vector3D(m_cosRt * v1.x + m_sinRt * v1.y,
-m_sinRt * v1.x + m_cosRt * v1.y,
v1.z);
Vector3D v2r = Vector3D(m_cosRt * v2.x + m_sinRt * v2.y,
-m_sinRt * v2.x + m_cosRt * v2.y,
v2.z);
float alpha_g2_tanTheta2_1 = (pow2(v1r.x * m_alpha_gx) + pow2(v1r.y * m_alpha_gy)) / pow2(v1r.z);
float alpha_g2_tanTheta2_2 = (pow2(v2r.x * m_alpha_gx) + pow2(v2r.y * m_alpha_gy)) / pow2(v2r.z);
float Lambda1 = (-1 + std::sqrt(1 + alpha_g2_tanTheta2_1)) / 2;
float Lambda2 = (-1 + std::sqrt(1 + alpha_g2_tanTheta2_2)) / 2;
float chi1 = (dot(v1, m) / v1.z) > 0 ? 1 : 0;
float chi2 = (dot(v2, m) / v2.z) > 0 ? 1 : 0;
return chi1 * chi2 / (1 + Lambda1 + Lambda2);
}
RT_FUNCTION float sample(const Vector3D &v, float u0, float u1, Normal3D* m, float* normalPDF) {
Vector3D vr = Vector3D(m_cosRt * v.x + m_sinRt * v.y,
-m_sinRt * v.x + m_cosRt * v.y,
v.z);
// stretch view
Vector3D sv = normalize(Vector3D(m_alpha_gx * vr.x, m_alpha_gy * vr.y, vr.z));
// orthonormal basis
// Vector3D T1 = (sv.z < 0.9999f) ? normalize(cross(sv, Vector3D::Ez)) : Vector3D::Ex;
// Vector3D T2 = cross(T1, sv);
float distIn2D = std::sqrt(sv.x * sv.x + sv.y * sv.y);
float recDistIn2D = 1.0f / distIn2D;
Vector3D T1 = (sv.z < 0.9999f) ? Vector3D(sv.y * recDistIn2D, -sv.x * recDistIn2D, 0) : Vector3D::Ex();
Vector3D T2 = Vector3D(T1.y * sv.z, -T1.x * sv.z, distIn2D);
// sample point with polar coordinates (r, phi)
float a = 1.0f / (1.0f + sv.z);
float r = std::sqrt(u0);
float phi = M_PIf * ((u1 < a) ? u1 / a : 1 + (u1 - a) / (1.0f - a));
float sinPhi, cosPhi;
VLR::sincos(phi, &sinPhi, &cosPhi);
float P1 = r * cosPhi;
float P2 = r * sinPhi * ((u1 < a) ? 1.0f : sv.z);
// compute normal
Normal3D mr = P1 * T1 + P2 * T2 + std::sqrt(1.0f - P1 * P1 - P2 * P2) * sv;
// unstretch
mr = normalize(Normal3D(m_alpha_gx * mr.x, m_alpha_gy * mr.y, mr.z));
float D = evaluate(mr);
*normalPDF = evaluateSmithG1(vr, mr) * absDot(vr, mr) * D / std::fabs(vr.z);
*m = Normal3D(m_cosRt * mr.x - m_sinRt * mr.y,
m_sinRt * mr.x + m_cosRt * mr.y,
mr.z);
return D;
}
RT_FUNCTION float evaluatePDF(const Vector3D &v, const Normal3D &m) {
return evaluateSmithG1(v, m) * absDot(v, m) * evaluate(m) / std::fabs(v.z);
}
};
// ----------------------------------------------------------------
// NullBSDF
RT_CALLABLE_PROGRAM uint32_t NullBSDF_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
return 0;
}
RT_CALLABLE_PROGRAM SampledSpectrum NullBSDF_getBaseColor(const uint32_t* params) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM bool NullBSDF_matches(const uint32_t* params, DirectionType flags) {
return false;
}
RT_CALLABLE_PROGRAM SampledSpectrum NullBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM SampledSpectrum NullBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM float NullBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return 0.0f;
}
RT_CALLABLE_PROGRAM float NullBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
return 0.0f;
}
// END: NullBSDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// MatteBRDF
struct MatteBRDF {
SampledSpectrum albedo;
float roughness;
};
RT_CALLABLE_PROGRAM uint32_t MatteSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
MatteBRDF &p = *(MatteBRDF*)params;
auto &mat = *(const MatteSurfaceMaterial*)matDesc;
p.albedo = calcNode(mat.nodeAlbedo, mat.immAlbedo, surfPt, wls);
p.roughness = 0.0f;
return sizeof(MatteBRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum MatteBRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const MatteBRDF*)params;
return p.albedo;
}
RT_CALLABLE_PROGRAM bool MatteBRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::LowFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum MatteBRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const MatteBRDF*)params;
result->dirLocal = cosineSampleHemisphere(uDir[0], uDir[1]);
result->dirPDF = result->dirLocal.z / M_PIf;
result->sampledType = DirectionType::Reflection() | DirectionType::LowFreq();
result->dirLocal.z *= query.dirLocal.z >= 0 ? 1 : -1;
return p.albedo / M_PIf;
}
RT_CALLABLE_PROGRAM SampledSpectrum MatteBRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MatteBRDF*)params;
if (query.dirLocal.z * dirLocal.z <= 0.0f) {
SampledSpectrum fs = SampledSpectrum::Zero();
return fs;
}
SampledSpectrum fs = p.albedo / M_PIf;
return fs;
}
RT_CALLABLE_PROGRAM float MatteBRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
if (query.dirLocal.z * dirLocal.z <= 0.0f) {
return 0.0f;
}
return std::fabs(dirLocal.z) / M_PIf;
}
RT_CALLABLE_PROGRAM float MatteBRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const MatteBRDF*)params;
return p.albedo.importance(query.wlHint);
}
// END: MatteBRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// SpecularBRDF
struct SpecularBRDF {
SampledSpectrum coeffR;
SampledSpectrum eta;
SampledSpectrum k;
};
RT_CALLABLE_PROGRAM uint32_t SpecularReflectionSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(SpecularBRDF*)params;
auto &mat = *(const SpecularReflectionSurfaceMaterial*)matDesc;
p.coeffR = calcNode(mat.nodeCoeffR, mat.immCoeffR, surfPt, wls);
p.eta = calcNode(mat.nodeEta, mat.immEta, surfPt, wls);
p.k = calcNode(mat.node_k, mat.imm_k, surfPt, wls);
return sizeof(SpecularBRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const SpecularBRDF*)params;
return p.coeffR;
}
RT_CALLABLE_PROGRAM bool SpecularBRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::Delta0D();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const SpecularBRDF*)params;
FresnelConductor fresnel(p.eta, p.k);
result->dirLocal = Vector3D(-query.dirLocal.x, -query.dirLocal.y, query.dirLocal.z);
result->dirPDF = 1.0f;
result->sampledType = DirectionType::Reflection() | DirectionType::Delta0D();
SampledSpectrum fs = p.coeffR * fresnel.evaluate(query.dirLocal.z) / std::fabs(query.dirLocal.z);
return fs;
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM float SpecularBRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return 0.0f;
}
RT_CALLABLE_PROGRAM float SpecularBRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const SpecularBRDF*)params;
FresnelDielectric fresnel(p.eta, p.k);
return (p.coeffR * fresnel.evaluate(query.dirLocal.z)).importance(query.wlHint);
}
// END: SpecularBRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// SpecularBSDF
struct SpecularBSDF {
SampledSpectrum coeff;
SampledSpectrum etaExt;
SampledSpectrum etaInt;
bool dispersive;
};
RT_CALLABLE_PROGRAM uint32_t SpecularScatteringSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(SpecularBSDF*)params;
auto &mat = *(const SpecularScatteringSurfaceMaterial*)matDesc;
p.coeff = calcNode(mat.nodeCoeff, mat.immCoeff, surfPt, wls);
p.etaExt = calcNode(mat.nodeEtaExt, mat.immEtaExt, surfPt, wls);
p.etaInt = calcNode(mat.nodeEtaInt, mat.immEtaInt, surfPt, wls);
p.dispersive = !wls.singleIsSelected();
return sizeof(SpecularBSDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBSDF_getBaseColor(const uint32_t* params) {
auto &p = *(const SpecularBSDF*)params;
return p.coeff;
}
RT_CALLABLE_PROGRAM bool SpecularBSDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::WholeSphere() | DirectionType::Delta0D();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const SpecularBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
const SampledSpectrum &eEnter = entering ? p.etaExt : p.etaInt;
const SampledSpectrum &eExit = entering ? p.etaInt : p.etaExt;
FresnelDielectric fresnel(eEnter, eExit);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
SampledSpectrum F = fresnel.evaluate(dirV.z);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (uComponent < reflectProb) {
if (dirV.z == 0.0f) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
Vector3D dirL = Vector3D(-dirV.x, -dirV.y, dirV.z);
result->dirLocal = entering ? dirL : -dirL;
result->dirPDF = reflectProb;
result->sampledType = DirectionType::Reflection() | DirectionType::Delta0D();
SampledSpectrum fs = p.coeff * F / std::fabs(dirV.z);
return fs;
}
else {
float sinEnter2 = 1.0f - dirV.z * dirV.z;
float recRelIOR = eEnter[query.wlHint] / eExit[query.wlHint];// reciprocal of relative IOR.
float sinExit2 = recRelIOR * recRelIOR * sinEnter2;
if (sinExit2 >= 1.0f) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float cosExit = std::sqrt(std::fmax(0.0f, 1.0f - sinExit2));
Vector3D dirL = Vector3D(recRelIOR * -dirV.x, recRelIOR * -dirV.y, -cosExit);
result->dirLocal = entering ? dirL : -dirL;
result->dirPDF = 1.0f - reflectProb;
result->sampledType = DirectionType::Transmission() | DirectionType::Delta0D() | (p.dispersive ? DirectionType::Dispersive() : DirectionType());
SampledSpectrum ret = SampledSpectrum::Zero();
ret[query.wlHint] = p.coeff[query.wlHint] * (1.0f - F[query.wlHint]);
SampledSpectrum fs = ret / std::fabs(cosExit);
return fs;
}
}
RT_CALLABLE_PROGRAM SampledSpectrum SpecularBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM float SpecularBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
return 0.0f;
}
RT_CALLABLE_PROGRAM float SpecularBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const SpecularBSDF*)params;
return p.coeff.importance(query.wlHint);
}
// END: SpecularBSDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// MicrofacetBRDF
struct MicrofacetBRDF {
SampledSpectrum eta;
SampledSpectrum k;
float alphaX;
float alphaY;
float rotation;
};
RT_CALLABLE_PROGRAM uint32_t MicrofacetReflectionSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(MicrofacetBRDF*)params;
auto &mat = *(const MicrofacetReflectionSurfaceMaterial*)matDesc;
p.eta = calcNode(mat.nodeEta, mat.immEta, surfPt, wls);
p.k = calcNode(mat.node_k, mat.imm_k, surfPt, wls);
optix::float3 roughnessAnisotropyRotation = calcNode(mat.nodeRoughnessAnisotropyRotation,
optix::make_float3(mat.immRoughness, mat.immAnisotropy, mat.immRotation),
surfPt, wls);
float alpha = pow2(roughnessAnisotropyRotation.x);
float aspect = std::sqrt(1.0f - 0.9f * roughnessAnisotropyRotation.y);
p.alphaX = std::fmax(0.001f, alpha / aspect);
p.alphaY = std::fmax(0.001f, alpha * aspect);
p.rotation = 2 * M_PIf * roughnessAnisotropyRotation.z;
return sizeof(MicrofacetBRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const MicrofacetBRDF*)params;
FresnelConductor fresnel(p.eta, p.k);
return fresnel.evaluate(1.0f);
}
RT_CALLABLE_PROGRAM bool MicrofacetBRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::HighFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const MicrofacetBRDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelConductor fresnel(p.eta, p.k);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
// JP: ハーフベクトルをサンプルして、最終的な方向サンプルを生成する。
// EN: sample a half vector, then generate a resulting direction sample based on it.
Normal3D m;
float mPDF;
float D = ggx.sample(dirV, uDir[0], uDir[1], &m, &mPDF);
float dotHV = dot(dirV, m);
if (dotHV <= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
Vector3D dirL = 2 * dotHV * m - dirV;
result->dirLocal = entering ? dirL : -dirL;
if (dirL.z * dirV.z <= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float commonPDFTerm = 1.0f / (4 * dotHV);
result->dirPDF = commonPDFTerm * mPDF;
result->sampledType = DirectionType::Reflection() | DirectionType::HighFreq();
SampledSpectrum F = fresnel.evaluate(dotHV);
float G = ggx.evaluateSmithG1(dirV, m) * ggx.evaluateSmithG1(dirL, m);
SampledSpectrum fs = F * D * G / (4 * dirV.z * dirL.z);
//VLRAssert(fs.allFinite(), "fs: %s, F: %s, G, %g, D: %g, wlIdx: %u, qDir: %s, rDir: %s",
// fs.toString().c_str(), F.toString().c_str(), G, D, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return fs;
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MicrofacetBRDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelConductor fresnel(p.eta, p.k);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
float dotNVdotNL = dirL.z * dirV.z;
if (dotNVdotNL <= 0)
return SampledSpectrum::Zero();
Normal3D m = halfVector(dirV, dirL);
float dotHV = dot(dirV, m);
float D = ggx.evaluate(m);
SampledSpectrum F = fresnel.evaluate(dotHV);
float G = ggx.evaluateSmithG1(dirV, m) * ggx.evaluateSmithG1(dirL, m);
SampledSpectrum fs = F * D * G / (4 * dotNVdotNL);
//VLRAssert(fs.allFinite(), "fs: %s, F: %s, G, %g, D: %g, wlIdx: %u, qDir: %s, dir: %s",
// fs.toString().c_str(), F.toString().c_str(), G, D, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return fs;
}
RT_CALLABLE_PROGRAM float MicrofacetBRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MicrofacetBRDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelConductor fresnel(p.eta, p.k);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
float dotNVdotNL = dirL.z * dirV.z;
if (dotNVdotNL <= 0.0f)
return 0.0f;
Normal3D m = halfVector(dirV, dirL);
float dotHV = dot(dirV, m);
if (dotHV <= 0)
return 0.0f;
float mPDF = ggx.evaluatePDF(dirV, m);
float commonPDFTerm = 1.0f / (4 * dotHV);
float ret = commonPDFTerm * mPDF;
//VLRAssert(std::isfinite(commonPDFTerm) && std::isfinite(mPDF),
// "commonPDFTerm: %g, mPDF: %g, wlIdx: %u, qDir: %s, dir: %s",
// commonPDFTerm, mPDF, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return ret;
}
RT_CALLABLE_PROGRAM float MicrofacetBRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const MicrofacetBRDF*)params;
FresnelConductor fresnel(p.eta, p.k);
float expectedDotHV = query.dirLocal.z;
return fresnel.evaluate(expectedDotHV).importance(query.wlHint);
}
// END: MicrofacetBRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// MicrofacetBSDF
struct MicrofacetBSDF {
SampledSpectrum coeff;
SampledSpectrum etaExt;
SampledSpectrum etaInt;
float alphaX;
float alphaY;
float rotation;
};
RT_CALLABLE_PROGRAM uint32_t MicrofacetScatteringSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(MicrofacetBSDF*)params;
auto &mat = *(const MicrofacetScatteringSurfaceMaterial*)matDesc;
p.coeff = calcNode(mat.nodeCoeff, mat.immCoeff, surfPt, wls);
p.etaExt = calcNode(mat.nodeEtaExt, mat.immEtaExt, surfPt, wls);
p.etaInt = calcNode(mat.nodeEtaInt, mat.immEtaInt, surfPt, wls);
optix::float3 roughnessAnisotropyRotation = calcNode(mat.nodeRoughnessAnisotropyRotation,
optix::make_float3(mat.immRoughness, mat.immAnisotropy, mat.immRotation),
surfPt, wls);
float alpha = pow2(roughnessAnisotropyRotation.x);
float aspect = std::sqrt(1 - 0.9f * roughnessAnisotropyRotation.y);
p.alphaX = std::fmax(0.001f, alpha / aspect);
p.alphaY = std::fmax(0.001f, alpha * aspect);
p.rotation = 2 * M_PIf * roughnessAnisotropyRotation.z;
return sizeof(MicrofacetBSDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBSDF_getBaseColor(const uint32_t* params) {
auto &p = *(const MicrofacetBSDF*)params;
return p.coeff;
}
RT_CALLABLE_PROGRAM bool MicrofacetBSDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::WholeSphere() | DirectionType::HighFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const MicrofacetBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
const SampledSpectrum &eEnter = entering ? p.etaExt : p.etaInt;
const SampledSpectrum &eExit = entering ? p.etaInt : p.etaExt;
FresnelDielectric fresnel(eEnter, eExit);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
// JP: ハーフベクトルをサンプルする。
// EN: sample a half vector.
Normal3D m;
float mPDF;
float D = ggx.sample(dirV, uDir[0], uDir[1], &m, &mPDF);
float dotHV = dot(dirV, m);
if (dotHV <= 0 || std::isnan(D)) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
// JP: サンプルしたハーフベクトルからフレネル項の値を計算して、反射か透過を選択する。
// EN: calculate the Fresnel term using the sampled half vector, then select reflection or transmission.
SampledSpectrum F = fresnel.evaluate(dotHV);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (uComponent < reflectProb) {
// JP: 最終的な方向サンプルを生成する。
// EN: calculate a resulting direction.
Vector3D dirL = 2 * dotHV * m - dirV;
result->dirLocal = entering ? dirL : -dirL;
if (dirL.z * dirV.z <= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float commonPDFTerm = reflectProb / (4 * dotHV);
result->dirPDF = commonPDFTerm * mPDF;
result->sampledType = DirectionType::Reflection() | DirectionType::HighFreq();
float G = ggx.evaluateSmithG1(dirV, m) * ggx.evaluateSmithG1(dirL, m);
SampledSpectrum fs = F * D * G / (4 * dirV.z * dirL.z);
//VLRAssert(fs.allFinite(), "fs: %s, F: %g, %g, %g, G, %g, D: %g, wlIdx: %u, qDir: (%g, %g, %g), rDir: (%g, %g, %g)",
// fs.toString().c_str(), F.toString().c_str(), G, D, query.wlHint,
// dirV.x, dirV.y, dirV.z, dirL.x, dirL.y, dirL.z);
return fs;
}
else {
// JP: 最終的な方向サンプルを生成する。
// EN: calculate a resulting direction.
float recRelIOR = eEnter[query.wlHint] / eExit[query.wlHint];
float innerRoot = 1 + recRelIOR * recRelIOR * (dotHV * dotHV - 1);
if (innerRoot < 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
Vector3D dirL = (recRelIOR * dotHV - std::sqrt(innerRoot)) * m - recRelIOR * dirV;
result->dirLocal = entering ? dirL : -dirL;
if (dirL.z * dirV.z >= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float dotHL = dot(dirL, m);
float commonPDFTerm = (1 - reflectProb) / std::pow(eEnter[query.wlHint] * dotHV + eExit[query.wlHint] * dotHL, 2);
result->dirPDF = commonPDFTerm * mPDF * eExit[query.wlHint] * eExit[query.wlHint] * std::fabs(dotHL);
result->sampledType = DirectionType::Transmission() | DirectionType::HighFreq();
// JP: マイクロファセットBSDFの各項の値を波長成分ごとに計算する。
// EN: calculate the value of each term of the microfacet BSDF for each wavelength component.
SampledSpectrum ret = SampledSpectrum::Zero();
for (int wlIdx = 0; wlIdx < SampledSpectrum::NumComponents(); ++wlIdx) {
Normal3D m_wl = normalize(-(eEnter[wlIdx] * dirV + eExit[wlIdx] * dirL) * (entering ? 1 : -1));
float dotHV_wl = dot(dirV, m_wl);
float dotHL_wl = dot(dirL, m_wl);
float F_wl = fresnel.evaluate(dotHV_wl, wlIdx);
float G_wl = ggx.evaluateSmithG1(dirV, m_wl) * ggx.evaluateSmithG1(dirL, m_wl);
float D_wl = ggx.evaluate(m_wl);
ret[wlIdx] = std::fabs(dotHV_wl * dotHL_wl) * (1 - F_wl) * G_wl * D_wl / std::pow(eEnter[wlIdx] * dotHV_wl + eExit[wlIdx] * dotHL_wl, 2);
//VLRAssert(std::isfinite(ret[wlIdx]), "fs: %g, F: %g, G, %g, D: %g, wlIdx: %u, qDir: %s",
// ret[wlIdx], F_wl, G_wl, D_wl, query.wlHint, dirV.toString().c_str());
}
ret /= std::fabs(dirV.z * dirL.z);
ret *= eEnter * eEnter;
//ret *= query.adjoint ? (eExit * eExit) : (eEnter * eEnter);// adjoint: need to cancel eEnter^2 / eExit^2 => eEnter^2 * (eExit^2 / eEnter^2)
//VLRAssert(ret.allFinite(), "fs: %s, wlIdx: %u, qDir: %s, rDir: %s",
// ret.toString().c_str(), query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return ret;
}
}
RT_CALLABLE_PROGRAM SampledSpectrum MicrofacetBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MicrofacetBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
const SampledSpectrum &eEnter = entering ? p.etaExt : p.etaInt;
const SampledSpectrum &eExit = entering ? p.etaInt : p.etaExt;
FresnelDielectric fresnel(eEnter, eExit);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
float dotNVdotNL = dirL.z * dirV.z;
if (dotNVdotNL > 0 && query.dirTypeFilter.matches(DirectionType::Reflection() | DirectionType::AllFreq())) {
Normal3D m = halfVector(dirV, dirL);
float dotHV = dot(dirV, m);
float D = ggx.evaluate(m);
SampledSpectrum F = fresnel.evaluate(dotHV);
float G = ggx.evaluateSmithG1(dirV, m) * ggx.evaluateSmithG1(dirL, m);
SampledSpectrum fs = F * D * G / (4 * dotNVdotNL);
//VLRAssert(fs.allFinite(), "fs: %s, F: %s, G, %g, D: %g, wlIdx: %u, qDir: %s, dir: %s",
// fs.toString().c_str(), F.toString().c_str(), G, D, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return fs;
}
else if (dotNVdotNL < 0 && query.dirTypeFilter.matches(DirectionType::Transmission() | DirectionType::AllFreq())) {
SampledSpectrum ret = SampledSpectrum::Zero();
for (int wlIdx = 0; wlIdx < SampledSpectrum::NumComponents(); ++wlIdx) {
Normal3D m_wl = normalize(-(eEnter[wlIdx] * dirV + eExit[wlIdx] * dirL) * (entering ? 1 : -1));
float dotHV_wl = dot(dirV, m_wl);
float dotHL_wl = dot(dirL, m_wl);
float F_wl = fresnel.evaluate(dotHV_wl, wlIdx);
float G_wl = ggx.evaluateSmithG1(dirV, m_wl) * ggx.evaluateSmithG1(dirL, m_wl);
float D_wl = ggx.evaluate(m_wl);
ret[wlIdx] = std::fabs(dotHV_wl * dotHL_wl) * (1 - F_wl) * G_wl * D_wl / std::pow(eEnter[wlIdx] * dotHV_wl + eExit[wlIdx] * dotHL_wl, 2);
//VLRAssert(std::isfinite(ret[wlIdx]), "fs: %g, F: %g, G, %g, D: %g, wlIdx: %u, qDir: %s, dir: %s",
// ret[wlIdx], F_wl, G_wl, D_wl, query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
}
ret /= std::fabs(dotNVdotNL);
ret *= eEnter * eEnter;
//ret *= query.adjoint ? (eExit * eExit) : (eEnter * eEnter);// !adjoint: eExit^2 * (eEnter / eExit)^2
//VLRAssert(ret.allFinite(), "fs: %s, wlIdx: %u, qDir: %s, dir: %s",
// ret.toString().c_str(), query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return ret;
}
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM float MicrofacetBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MicrofacetBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
const SampledSpectrum &eEnter = entering ? p.etaExt : p.etaInt;
const SampledSpectrum &eExit = entering ? p.etaInt : p.etaExt;
FresnelDielectric fresnel(eEnter, eExit);
GGXMicrofacetDistribution ggx(p.alphaX, p.alphaY, p.rotation);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
float dotNVdotNL = dirL.z * dirV.z;
if (dotNVdotNL == 0)
return 0.0f;
Normal3D m;
if (dotNVdotNL > 0)
m = halfVector(dirV, dirL);
else
m = normalize(-(eEnter[query.wlHint] * dirV + eExit[query.wlHint] * dirL));
float dotHV = dot(dirV, m);
if (dotHV <= 0)
return 0.0f;
float mPDF = ggx.evaluatePDF(dirV, m);
SampledSpectrum F = fresnel.evaluate(dotHV);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (dotNVdotNL > 0) {
float commonPDFTerm = reflectProb / (4 * dotHV);
//VLRAssert(std::isfinite(commonPDFTerm) && std::isfinite(mPDF),
// "commonPDFTerm: %g, mPDF: %g, F: %s, wlIdx: %u, qDir: %s, dir: %s",
// commonPDFTerm, mPDF, F.toString().c_str(), query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return commonPDFTerm * mPDF;
}
else {
float dotHL = dot(dirL, m);
float commonPDFTerm = (1 - reflectProb) / std::pow(eEnter[query.wlHint] * dotHV + eExit[query.wlHint] * dotHL, 2);
//VLRAssert(std::isfinite(commonPDFTerm) && std::isfinite(mPDF),
// "commonPDFTerm: %g, mPDF: %g, F: %s, wlIdx: %u, qDir: %s, dir: %s",
// commonPDFTerm, mPDF, F.toString().c_str(), query.wlHint, dirV.toString().c_str(), dirL.toString().c_str());
return commonPDFTerm * mPDF * eExit[query.wlHint] * eExit[query.wlHint] * std::fabs(dotHL);
}
}
RT_CALLABLE_PROGRAM float MicrofacetBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const MicrofacetBSDF*)params;
return p.coeff.importance(query.wlHint);
}
// END: MicrofacetBSDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// LambertianBSDF
struct LambertianBSDF {
SampledSpectrum coeff;
float F0;
};
RT_CALLABLE_PROGRAM uint32_t LambertianScatteringSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(LambertianBSDF*)params;
auto &mat = *(const LambertianScatteringSurfaceMaterial*)matDesc;
p.coeff = calcNode(mat.nodeCoeff, mat.immCoeff, surfPt, wls);
p.F0 = calcNode(mat.nodeF0, mat.immF0, surfPt, wls);
return sizeof(LambertianBSDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum LambertianBSDF_getBaseColor(const uint32_t* params) {
auto &p = *(const LambertianBSDF*)params;
return p.coeff;
}
RT_CALLABLE_PROGRAM bool LambertianBSDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::WholeSphere() | DirectionType::LowFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum LambertianBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const LambertianBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelSchlick fresnel(p.F0);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = cosineSampleHemisphere(uDir[0], uDir[1]);
result->dirPDF = dirL.z / M_PIf;
SampledSpectrum F = fresnel.evaluate(query.dirLocal.z);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (uComponent < reflectProb) {
result->dirLocal = entering ? dirL : -dirL;
result->sampledType = DirectionType::Reflection() | DirectionType::LowFreq();
SampledSpectrum fs = F * p.coeff / M_PIf;
result->dirPDF *= reflectProb;
return fs;
}
else {
result->dirLocal = entering ? -dirL : dirL;
result->sampledType = DirectionType::Transmission() | DirectionType::LowFreq();
SampledSpectrum fs = (SampledSpectrum::One() - F) * p.coeff / M_PIf;
result->dirPDF *= (1 - reflectProb);
return fs;
}
}
RT_CALLABLE_PROGRAM SampledSpectrum LambertianBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const LambertianBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelSchlick fresnel(p.F0);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
SampledSpectrum F = fresnel.evaluate(query.dirLocal.z);
if (dirV.z * dirL.z > 0.0f) {
SampledSpectrum fs = F * p.coeff / M_PIf;
return fs;
}
else {
SampledSpectrum fs = (SampledSpectrum::One() - F) * p.coeff / M_PIf;
return fs;
}
}
RT_CALLABLE_PROGRAM float LambertianBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const LambertianBSDF*)params;
bool entering = query.dirLocal.z >= 0.0f;
FresnelSchlick fresnel(p.F0);
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
SampledSpectrum F = fresnel.evaluate(query.dirLocal.z);
float reflectProb = F.importance(query.wlHint);
if (query.dirTypeFilter.isReflection())
reflectProb = 1.0f;
if (query.dirTypeFilter.isTransmission())
reflectProb = 0.0f;
if (dirV.z * dirL.z > 0.0f) {
float dirPDF = reflectProb * dirL.z / M_PIf;
return dirPDF;
}
else {
float dirPDF = (1 - reflectProb) * std::fabs(dirL.z) / M_PIf;
return dirPDF;
}
}
RT_CALLABLE_PROGRAM float LambertianBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const LambertianBSDF*)params;
return p.coeff.importance(query.wlHint);
}
// END: LambertianBSDF
// ----------------------------------------------------------------
#define USE_HEIGHT_CORRELATED_SMITH
RT_FUNCTION SampledSpectrum diffuseAndSpecularBRDF_sampleInternal(const SampledSpectrum &diffuseColor, const SampledSpectrum &specularF0Color, float roughness,
const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
float alpha = roughness * roughness;
GGXMicrofacetDistribution ggx(alpha, alpha, 0.0f);
bool entering = query.dirLocal.z >= 0.0f;
Vector3D dirL;
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
float expectedF_D90 = 0.5f * roughness + 2 * roughness * query.dirLocal.z * query.dirLocal.z;
float oneMinusDotVN5 = std::pow(1 - dirV.z, 5);
float expectedDiffuseFresnel = lerp(1.0f, expectedF_D90, oneMinusDotVN5);
float iBaseColor = diffuseColor.importance(query.wlHint) * expectedDiffuseFresnel * expectedDiffuseFresnel * lerp(1.0f, 1.0f / 1.51f, roughness);
float expectedOneMinusDotVH5 = std::pow(1 - dirV.z, 5);
float iSpecularF0 = specularF0Color.importance(query.wlHint);
float diffuseWeight = iBaseColor;
float specularWeight = lerp(iSpecularF0, 1.0f, expectedOneMinusDotVH5);
float weights[] = { diffuseWeight, specularWeight };
float probSelection;
float sumWeights = 0.0f;
uint32_t component = sampleDiscrete(weights, 2, uComponent, &probSelection, &sumWeights, &uComponent);
float diffuseDirPDF, specularDirPDF;
SampledSpectrum fs;
Normal3D m;
float dotLH;
float D;
if (component == 0) {
result->sampledType = DirectionType::Reflection() | DirectionType::LowFreq();
// JP: コサイン分布からサンプルする。
// EN: sample based on cosine distribution.
dirL = cosineSampleHemisphere(uDir[0], uDir[1]);
diffuseDirPDF = dirL.z / M_PIf;
// JP: 同じ方向サンプルを別の要素からサンプルする確率密度を求める。
// EN: calculate PDFs to generate the sampled direction from the other distributions.
m = halfVector(dirL, dirV);
dotLH = dot(dirL, m);
float commonPDFTerm = 1.0f / (4 * dotLH);
specularDirPDF = commonPDFTerm * ggx.evaluatePDF(dirV, m);
D = ggx.evaluate(m);
}
else if (component == 1) {
result->sampledType = DirectionType::Reflection() | DirectionType::HighFreq();
// ----------------------------------------------------------------
// JP: ベーススペキュラー層のマイクロファセット分布からサンプルする。
// EN: sample based on the base specular microfacet distribution.
float mPDF;
D = ggx.sample(dirV, uDir[0], uDir[1], &m, &mPDF);
float dotVH = dot(dirV, m);
dotLH = dotVH;
dirL = 2 * dotVH * m - dirV;
if (dirL.z * dirV.z <= 0) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
float commonPDFTerm = 1.0f / (4 * dotLH);
specularDirPDF = commonPDFTerm * mPDF;
// ----------------------------------------------------------------
// JP: 同じ方向サンプルを別の要素からサンプルする確率密度を求める。
// EN: calculate PDFs to generate the sampled direction from the other distributions.
diffuseDirPDF = dirL.z / M_PIf;
}
float oneMinusDotLH5 = std::pow(1 - dotLH, 5);
#if defined(USE_HEIGHT_CORRELATED_SMITH)
float G = ggx.evaluateHeightCorrelatedSmithG(dirL, dirV, m);
#else
float G = ggx.evaluateSmithG1(dirL, m) * ggx.evaluateSmithG1(dirV, m);
#endif
SampledSpectrum F = lerp(specularF0Color, SampledSpectrum::One(), oneMinusDotLH5);
float microfacetDenom = 4 * dirL.z * dirV.z;
SampledSpectrum specularValue = F * ((D * G) / microfacetDenom);
float F_D90 = 0.5f * roughness + 2 * roughness * dotLH * dotLH;
float oneMinusDotLN5 = std::pow(1 - dirL.z, 5);
float diffuseFresnelOut = lerp(1.0f, F_D90, oneMinusDotVN5);
float diffuseFresnelIn = lerp(1.0f, F_D90, oneMinusDotLN5);
SampledSpectrum diffuseValue = diffuseColor * (diffuseFresnelOut * diffuseFresnelIn * lerp(1.0f, 1.0f / 1.51f, roughness) / M_PIf);
SampledSpectrum ret = diffuseValue + specularValue;
result->dirLocal = entering ? dirL : -dirL;
// PDF based on the single-sample model MIS.
result->dirPDF = (diffuseDirPDF * diffuseWeight + specularDirPDF * specularWeight) / sumWeights;
return ret;
}
RT_FUNCTION SampledSpectrum diffuseAndSpecularBRDF_evaluateInternal(const SampledSpectrum &diffuseColor, const SampledSpectrum &specularF0Color, float roughness,
const BSDFQuery &query, const Vector3D &dirLocal) {
float alpha = roughness * roughness;
GGXMicrofacetDistribution ggx(alpha, alpha, 0.0f);
if (dirLocal.z * query.dirLocal.z <= 0) {
return SampledSpectrum::Zero();
}
bool entering = query.dirLocal.z >= 0.0f;
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
Normal3D m = halfVector(dirL, dirV);
float dotLH = dot(dirL, m);
float oneMinusDotLH5 = std::pow(1 - dotLH, 5);
float D = ggx.evaluate(m);
#if defined(USE_HEIGHT_CORRELATED_SMITH)
float G = ggx.evaluateHeightCorrelatedSmithG(dirL, dirV, m);
#else
float G = ggx.evaluateSmithG1(dirL, m) * ggx.evaluateSmithG1(dirV, m);
#endif
SampledSpectrum F = lerp(specularF0Color, SampledSpectrum::One(), oneMinusDotLH5);
float microfacetDenom = 4 * dirL.z * dirV.z;
SampledSpectrum specularValue = F * ((D * G) / microfacetDenom);
float F_D90 = 0.5f * roughness + 2 * roughness * dotLH * dotLH;
float oneMinusDotVN5 = std::pow(1 - dirV.z, 5);
float oneMinusDotLN5 = std::pow(1 - dirL.z, 5);
float diffuseFresnelOut = lerp(1.0f, F_D90, oneMinusDotVN5);
float diffuseFresnelIn = lerp(1.0f, F_D90, oneMinusDotLN5);
SampledSpectrum diffuseValue = diffuseColor * (diffuseFresnelOut * diffuseFresnelIn * lerp(1.0f, 1.0f / 1.51f, roughness) / M_PIf);
SampledSpectrum ret = diffuseValue + specularValue;
return ret;
}
RT_FUNCTION float diffuseAndSpecularBRDF_evaluatePDFInternal(const SampledSpectrum &diffuseColor, const SampledSpectrum &specularF0Color, float roughness,
const BSDFQuery &query, const Vector3D &dirLocal) {
float alpha = roughness * roughness;
GGXMicrofacetDistribution ggx(alpha, alpha, 0.0f);
bool entering = query.dirLocal.z >= 0.0f;
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
Vector3D dirL = entering ? dirLocal : -dirLocal;
Normal3D m = halfVector(dirL, dirV);
float dotLH = dot(dirL, m);
float commonPDFTerm = 1.0f / (4 * dotLH);
float expectedF_D90 = 0.5f * roughness + 2 * roughness * query.dirLocal.z * query.dirLocal.z;
float oneMinusDotVN5 = std::pow(1 - dirV.z, 5);
float expectedDiffuseFresnel = lerp(1.0f, expectedF_D90, oneMinusDotVN5);
float iBaseColor = diffuseColor.importance(query.wlHint) * expectedDiffuseFresnel * expectedDiffuseFresnel * lerp(1.0f, 1.0f / 1.51f, roughness);
float expectedOneMinusDotVH5 = std::pow(1 - dirV.z, 5);
float iSpecularF0 = specularF0Color.importance(query.wlHint);
float diffuseWeight = iBaseColor;
float specularWeight = lerp(iSpecularF0, 1.0f, expectedOneMinusDotVH5);
float sumWeights = diffuseWeight + specularWeight;
float diffuseDirPDF = dirL.z / M_PIf;
float specularDirPDF = commonPDFTerm * ggx.evaluatePDF(dirV, m);
float ret = (diffuseDirPDF * diffuseWeight + specularDirPDF * specularWeight) / sumWeights;
return ret;
}
RT_FUNCTION float diffuseAndSpecularBRDF_weightInternal(const SampledSpectrum &diffuseColor, const SampledSpectrum &specularF0Color, float roughness,
const BSDFQuery &query) {
bool entering = query.dirLocal.z >= 0.0f;
Vector3D dirV = entering ? query.dirLocal : -query.dirLocal;
float expectedF_D90 = 0.5f * roughness + 2 * roughness * query.dirLocal.z * query.dirLocal.z;
float oneMinusDotVN5 = std::pow(1 - dirV.z, 5);
float expectedDiffuseFresnel = lerp(1.0f, expectedF_D90, oneMinusDotVN5);
float iBaseColor = diffuseColor.importance(query.wlHint) * expectedDiffuseFresnel * expectedDiffuseFresnel * lerp(1.0f, 1.0f / 1.51f, roughness);
float expectedOneMinusDotVH5 = std::pow(1 - dirV.z, 5);
float iSpecularF0 = specularF0Color.importance(query.wlHint);
float diffuseWeight = iBaseColor;
float specularWeight = lerp(iSpecularF0, 1.0f, expectedOneMinusDotVH5);
return diffuseWeight + specularWeight;
}
// ----------------------------------------------------------------
// UE4 (Modified) BRDF
struct UE4BRDF {
SampledSpectrum baseColor;
float roughness;
float metallic;
};
RT_CALLABLE_PROGRAM uint32_t UE4SurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(UE4BRDF*)params;
auto &mat = *(const UE4SurfaceMaterial*)matDesc;
p.baseColor = calcNode(mat.nodeBaseColor, mat.immBaseColor, surfPt, wls);
optix::float3 occlusionRoughnessMetallic = calcNode(mat.nodeOcclusionRoughnessMetallic,
optix::make_float3(mat.immOcclusion, mat.immRoughness, mat.immMetallic),
surfPt, wls);
p.roughness = std::fmax(0.01f, occlusionRoughnessMetallic.y);
p.metallic = occlusionRoughnessMetallic.z;
return sizeof(UE4BRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum UE4BRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const UE4BRDF*)params;
return p.baseColor;
}
RT_CALLABLE_PROGRAM bool UE4BRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::LowFreq() | DirectionType::HighFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum UE4BRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const UE4BRDF*)params;
const float specular = 0.5f;
SampledSpectrum diffuseColor = p.baseColor * (1 - p.metallic);
SampledSpectrum specularF0Color = lerp(0.08f * specular * SampledSpectrum::One(), p.baseColor, p.metallic);
return diffuseAndSpecularBRDF_sampleInternal(diffuseColor, specularF0Color, p.roughness,
query, uComponent, uDir, result);
}
RT_CALLABLE_PROGRAM SampledSpectrum UE4BRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const UE4BRDF*)params;
const float specular = 0.5f;
SampledSpectrum diffuseColor = p.baseColor * (1 - p.metallic);
SampledSpectrum specularF0Color = lerp(0.08f * specular * SampledSpectrum::One(), p.baseColor, p.metallic);
return diffuseAndSpecularBRDF_evaluateInternal(diffuseColor, specularF0Color, p.roughness,
query, dirLocal);
}
RT_CALLABLE_PROGRAM float UE4BRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const UE4BRDF*)params;
const float specular = 0.5f;
SampledSpectrum diffuseColor = p.baseColor * (1 - p.metallic);
SampledSpectrum specularF0Color = lerp(0.08f * specular * SampledSpectrum::One(), p.baseColor, p.metallic);
return diffuseAndSpecularBRDF_evaluatePDFInternal(diffuseColor, specularF0Color, p.roughness,
query, dirLocal);
}
RT_CALLABLE_PROGRAM float UE4BRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const UE4BRDF*)params;
const float specular = 0.5f;
SampledSpectrum diffuseColor = p.baseColor * (1 - p.metallic);
SampledSpectrum specularF0Color = lerp(0.08f * specular * SampledSpectrum::One(), p.baseColor, p.metallic);
return diffuseAndSpecularBRDF_weightInternal(diffuseColor, specularF0Color, p.roughness,
query);
}
// END: UE4 (Modified) BRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// Old style BRDF
struct OldStyleBRDF {
SampledSpectrum diffuseColor;
SampledSpectrum specularColor;
float glossiness;
};
RT_CALLABLE_PROGRAM uint32_t OldStyleSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(OldStyleBRDF*)params;
auto &mat = *(const OldStyleSurfaceMaterial*)matDesc;
p.diffuseColor = calcNode(mat.nodeDiffuseColor, mat.immDiffuseColor, surfPt, wls);
p.specularColor = calcNode(mat.nodeSpecularColor, mat.immSpecularColor, surfPt, wls);
p.glossiness = std::fmin(0.99f, calcNode(mat.nodeGlossiness, mat.immGlossiness, surfPt, wls));
return sizeof(OldStyleBRDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum OldStyleBRDF_getBaseColor(const uint32_t* params) {
auto &p = *(const OldStyleBRDF*)params;
return p.diffuseColor + p.specularColor;
}
RT_CALLABLE_PROGRAM bool OldStyleBRDF_matches(const uint32_t* params, DirectionType flags) {
DirectionType m_type = DirectionType::Reflection() | DirectionType::LowFreq() | DirectionType::HighFreq();
return m_type.matches(flags);
}
RT_CALLABLE_PROGRAM SampledSpectrum OldStyleBRDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const OldStyleBRDF*)params;
return diffuseAndSpecularBRDF_sampleInternal(p.diffuseColor, p.specularColor, 1 - p.glossiness,
query, uComponent, uDir, result);
}
RT_CALLABLE_PROGRAM SampledSpectrum OldStyleBRDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const OldStyleBRDF*)params;
return diffuseAndSpecularBRDF_evaluateInternal(p.diffuseColor, p.specularColor, 1 - p.glossiness,
query, dirLocal);
}
RT_CALLABLE_PROGRAM float OldStyleBRDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const OldStyleBRDF*)params;
return diffuseAndSpecularBRDF_evaluatePDFInternal(p.diffuseColor, p.specularColor, 1 - p.glossiness,
query, dirLocal);
}
RT_CALLABLE_PROGRAM float OldStyleBRDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const OldStyleBRDF*)params;
return diffuseAndSpecularBRDF_weightInternal(p.diffuseColor, p.specularColor, 1 - p.glossiness,
query);
}
// END: Old style BRDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// NullEDF
RT_CALLABLE_PROGRAM uint32_t NullEDF_setupEDF(const uint32_t* matDesc, const SurfacePoint &surfPt, uint32_t* params) {
return 0;
}
RT_CALLABLE_PROGRAM SampledSpectrum NullEDF_evaluateEmittanceInternal(const uint32_t* params) {
return SampledSpectrum::Zero();
}
RT_CALLABLE_PROGRAM SampledSpectrum NullEDF_evaluateInternal(const uint32_t* params, const EDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum::Zero();
}
// END: NullEDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// DiffuseEDF
struct DiffuseEDF {
SampledSpectrum emittance;
};
RT_CALLABLE_PROGRAM uint32_t DiffuseEmitterSurfaceMaterial_setupEDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(DiffuseEDF*)params;
auto &mat = *(const DiffuseEmitterSurfaceMaterial*)matDesc;
p.emittance = calcNode(mat.nodeEmittance, mat.immEmittance, surfPt, wls);
return sizeof(DiffuseEDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum DiffuseEDF_evaluateEmittanceInternal(const uint32_t* params) {
auto &p = *(const DiffuseEDF*)params;
return p.emittance;
}
RT_CALLABLE_PROGRAM SampledSpectrum DiffuseEDF_evaluateInternal(const uint32_t* params, const EDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum(dirLocal.z > 0.0f ? 1.0f / M_PIf : 0.0f);
}
// END: DiffuseEDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// MultiBSDF / MultiEDF
// bsdf0-3: param offsets
// numBSDFs
// --------------------------------
// BSDF0 procedure set index
// BSDF0 params
// ...
// BSDF3 procedure set index
// BSDF3 params
struct MultiBSDF {
struct {
unsigned int bsdf0 : 6;
unsigned int bsdf1 : 6;
unsigned int bsdf2 : 6;
unsigned int bsdf3 : 6;
unsigned int numBSDFs : 8;
};
};
RT_CALLABLE_PROGRAM uint32_t MultiSurfaceMaterial_setupBSDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(MultiBSDF*)params;
auto &mat = *(const MultiSurfaceMaterial*)matDesc;
uint32_t baseIndex = sizeof(MultiBSDF) / 4;
uint32_t bsdfOffsets[4] = { 0, 0, 0, 0 };
for (int i = 0; i < mat.numSubMaterials; ++i) {
bsdfOffsets[i] = baseIndex;
const SurfaceMaterialDescriptor subMatDesc = pv_materialDescriptorBuffer[mat.subMatIndices[i]];
ProgSigSetupBSDF setupBSDF = (ProgSigSetupBSDF)subMatDesc.progSetupBSDF;
*(params + baseIndex++) = subMatDesc.bsdfProcedureSetIndex;
baseIndex += setupBSDF(subMatDesc.data, surfPt, wls, params + baseIndex);
}
p.bsdf0 = bsdfOffsets[0];
p.bsdf1 = bsdfOffsets[1];
p.bsdf2 = bsdfOffsets[2];
p.bsdf3 = bsdfOffsets[3];
p.numBSDFs = mat.numSubMaterials;
//vlrDevPrintf("%u, %u, %u, %u, %u mats\n", p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3, p.numBSDFs);
return baseIndex;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiBSDF_getBaseColor(const uint32_t* params) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
SampledSpectrum ret;
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFGetBaseColor getBaseColor = (ProgSigBSDFGetBaseColor)procSet.progGetBaseColor;
ret += getBaseColor(bsdf + 1);
}
return ret;
}
RT_CALLABLE_PROGRAM bool MultiBSDF_matches(const uint32_t* params, DirectionType flags) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFmatches matches = (ProgSigBSDFmatches)procSet.progMatches;
if (matches(bsdf + 1, flags))
return true;
}
return false;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiBSDF_sampleInternal(const uint32_t* params, const BSDFQuery &query, float uComponent, const float uDir[2], BSDFQueryResult* result) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
float weights[4];
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFWeightInternal weightInternal = (ProgSigBSDFWeightInternal)procSet.progWeightInternal;
weights[i] = weightInternal(bsdf + 1, query);
}
// JP: 各BSDFのウェイトに基づいて方向のサンプルを行うBSDFを選択する。
// EN: Based on the weight of each BSDF, select a BSDF from which direction sampling.
float tempProb;
float sumWeights;
uint32_t idx = sampleDiscrete(weights, p.numBSDFs, uComponent, &tempProb, &sumWeights, &uComponent);
if (sumWeights == 0.0f) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
const uint32_t* selectedBSDF = params + bsdfOffsets[idx];
uint32_t selProcIdx = *(const uint32_t*)selectedBSDF;
const BSDFProcedureSet selProcSet = pv_bsdfProcedureSetBuffer[selProcIdx];
ProgSigBSDFSampleInternal sampleInternal = (ProgSigBSDFSampleInternal)selProcSet.progSampleInternal;
// JP: 選択したBSDFから方向をサンプリングする。
// EN: sample a direction from the selected BSDF.
SampledSpectrum value = sampleInternal(selectedBSDF + 1, query, uComponent, uDir, result);
result->dirPDF *= weights[idx];
if (result->dirPDF == 0.0f) {
result->dirPDF = 0.0f;
return SampledSpectrum::Zero();
}
// JP: サンプルした方向に関するBSDFの値の合計と、single-sample model MISに基づいた確率密度を計算する。
// EN: calculate the total of BSDF values and a PDF based on the single-sample model MIS for the sampled direction.
if (!result->sampledType.isDelta()) {
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFmatches matches = (ProgSigBSDFmatches)procSet.progMatches;
ProgSigBSDFEvaluatePDFInternal evaluatePDFInternal = (ProgSigBSDFEvaluatePDFInternal)procSet.progEvaluatePDFInternal;
if (i != idx && matches(bsdf + 1, query.dirTypeFilter))
result->dirPDF += evaluatePDFInternal(bsdf + 1, query, result->dirLocal) * weights[i];
}
BSDFQuery mQuery = query;
mQuery.dirTypeFilter &= sideTest(query.geometricNormalLocal, query.dirLocal, result->dirLocal);
value = SampledSpectrum::Zero();
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFmatches matches = (ProgSigBSDFmatches)procSet.progMatches;
ProgSigBSDFEvaluateInternal evaluateInternal = (ProgSigBSDFEvaluateInternal)procSet.progEvaluateInternal;
if (!matches(bsdf + 1, mQuery.dirTypeFilter))
continue;
value += evaluateInternal(bsdf + 1, mQuery, result->dirLocal);
}
}
result->dirPDF /= sumWeights;
return value;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiBSDF_evaluateInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
SampledSpectrum retValue = SampledSpectrum::Zero();
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFmatches matches = (ProgSigBSDFmatches)procSet.progMatches;
ProgSigBSDFEvaluateInternal evaluateInternal = (ProgSigBSDFEvaluateInternal)procSet.progEvaluateInternal;
if (!matches(bsdf + 1, query.dirTypeFilter))
continue;
retValue += evaluateInternal(bsdf + 1, query, dirLocal);
}
return retValue;
}
RT_CALLABLE_PROGRAM float MultiBSDF_evaluatePDFInternal(const uint32_t* params, const BSDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
float sumWeights = 0.0f;
float weights[4];
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFWeightInternal weightInternal = (ProgSigBSDFWeightInternal)procSet.progWeightInternal;
weights[i] = weightInternal(bsdf + 1, query);
sumWeights += weights[i];
}
if (sumWeights == 0.0f)
return 0.0f;
float retPDF = 0.0f;
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFEvaluatePDFInternal evaluatePDFInternal = (ProgSigBSDFEvaluatePDFInternal)procSet.progEvaluatePDFInternal;
if (weights[i] > 0)
retPDF += evaluatePDFInternal(bsdf + 1, query, dirLocal) * weights[i];
}
retPDF /= sumWeights;
return retPDF;
}
RT_CALLABLE_PROGRAM float MultiBSDF_weightInternal(const uint32_t* params, const BSDFQuery &query) {
auto &p = *(const MultiBSDF*)params;
uint32_t bsdfOffsets[4] = { p.bsdf0, p.bsdf1, p.bsdf2, p.bsdf3 };
float ret = 0.0f;
for (int i = 0; i < p.numBSDFs; ++i) {
const uint32_t* bsdf = params + bsdfOffsets[i];
uint32_t procIdx = *(const uint32_t*)bsdf;
const BSDFProcedureSet procSet = pv_bsdfProcedureSetBuffer[procIdx];
ProgSigBSDFWeightInternal weightInternal = (ProgSigBSDFWeightInternal)procSet.progWeightInternal;
ret += weightInternal(bsdf + 1, query);
}
return ret;
}
// edf0-3: param offsets
// numEDFs
// --------------------------------
// EDF0 procedure set index
// EDF0 params
// ...
// EDF3 procedure set index
// EDF3 params
struct MultiEDF {
struct {
unsigned int edf0 : 6;
unsigned int edf1 : 6;
unsigned int edf2 : 6;
unsigned int edf3 : 6;
unsigned int numEDFs : 8;
};
};
RT_CALLABLE_PROGRAM uint32_t MultiSurfaceMaterial_setupEDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(MultiEDF*)params;
auto &mat = *(const MultiSurfaceMaterial*)matDesc;
uint32_t baseIndex = sizeof(MultiEDF) / 4;
uint32_t edfOffsets[4] = { 0, 0, 0, 0 };
for (int i = 0; i < mat.numSubMaterials; ++i) {
edfOffsets[i] = baseIndex;
const SurfaceMaterialDescriptor subMatDesc = pv_materialDescriptorBuffer[mat.subMatIndices[i]];
ProgSigSetupEDF setupEDF = (ProgSigSetupEDF)subMatDesc.progSetupEDF;
*(params + baseIndex++) = subMatDesc.edfProcedureSetIndex;
baseIndex += setupEDF(subMatDesc.data, surfPt, wls, params + baseIndex);
}
p.edf0 = edfOffsets[0];
p.edf1 = edfOffsets[1];
p.edf2 = edfOffsets[2];
p.edf3 = edfOffsets[3];
p.numEDFs = mat.numSubMaterials;
return baseIndex;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiEDF_evaluateEmittanceInternal(const uint32_t* params) {
auto &p = *(const MultiEDF*)params;
uint32_t edfOffsets[4] = { p.edf0, p.edf1, p.edf2, p.edf3 };
SampledSpectrum ret = SampledSpectrum::Zero();
for (int i = 0; i < p.numEDFs; ++i) {
const uint32_t* edf = params + edfOffsets[i];
uint32_t procIdx = *(const uint32_t*)edf;
const EDFProcedureSet procSet = pv_edfProcedureSetBuffer[procIdx];
ProgSigEDFEvaluateEmittanceInternal evaluateEmittanceInternal = (ProgSigEDFEvaluateEmittanceInternal)procSet.progEvaluateEmittanceInternal;
ret += evaluateEmittanceInternal(edf + 1);
}
return ret;
}
RT_CALLABLE_PROGRAM SampledSpectrum MultiEDF_evaluateInternal(const uint32_t* params, const EDFQuery &query, const Vector3D &dirLocal) {
auto &p = *(const MultiEDF*)params;
uint32_t edfOffsets[4] = { p.edf0, p.edf1, p.edf2, p.edf3 };
SampledSpectrum ret = SampledSpectrum::Zero();
SampledSpectrum sumEmittance = SampledSpectrum::Zero();
for (int i = 0; i < p.numEDFs; ++i) {
const uint32_t* edf = params + edfOffsets[i];
uint32_t procIdx = *(const uint32_t*)edf;
const EDFProcedureSet procSet = pv_edfProcedureSetBuffer[procIdx];
ProgSigEDFEvaluateEmittanceInternal evaluateEmittanceInternal = (ProgSigEDFEvaluateEmittanceInternal)procSet.progEvaluateEmittanceInternal;
ProgSigEDFEvaluateInternal evaluateInternal = (ProgSigEDFEvaluateInternal)procSet.progEvaluateInternal;
SampledSpectrum emittance = evaluateEmittanceInternal(edf + 1);
sumEmittance += emittance;
ret += emittance * evaluateInternal(edf + 1, query, dirLocal);
}
ret.safeDivide(sumEmittance);
return ret;
}
// END: MultiBSDF / MultiEDF
// ----------------------------------------------------------------
// ----------------------------------------------------------------
// EnvironmentEDF
struct EnvironmentEDF {
SampledSpectrum emittance;
};
RT_CALLABLE_PROGRAM uint32_t EnvironmentEmitterSurfaceMaterial_setupEDF(const uint32_t* matDesc, const SurfacePoint &surfPt, const WavelengthSamples &wls, uint32_t* params) {
auto &p = *(EnvironmentEDF*)params;
auto &mat = *(const EnvironmentEmitterSurfaceMaterial*)matDesc;
p.emittance = calcNode(mat.nodeEmittance, mat.immEmittance, surfPt, wls) * mat.immScale;
return sizeof(EnvironmentEDF) / 4;
}
RT_CALLABLE_PROGRAM SampledSpectrum EnvironmentEDF_evaluateEmittanceInternal(const uint32_t* params) {
auto &p = *(const EnvironmentEDF*)params;
return M_PIf * p.emittance;
}
RT_CALLABLE_PROGRAM SampledSpectrum EnvironmentEDF_evaluateInternal(const uint32_t* params, const EDFQuery &query, const Vector3D &dirLocal) {
return SampledSpectrum(dirLocal.z > 0.0f ? 1.0f / M_PIf : 0.0f);
}
// END: EnvironmentEDF
// ----------------------------------------------------------------
}
|
0d76651c32944cd0d06fd99fb0ca6c0128faaebe.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) {
float compute_local[1];
__shared__ float A_shared[32];
__shared__ float B_shared[1024];
float A_shared_local[1];
float B_shared_local[1];
compute_local[(0)] = 0.000000e+00f;
for (int k_outer = 0; k_outer < 16; ++k_outer) {
__syncthreads();
A_shared[(((int)threadIdx.x))] = ((float*)A)[(((k_outer * 32) + ((int)threadIdx.x)))];
for (int ax1_inner = 0; ax1_inner < 32; ++ax1_inner) {
B_shared[(((ax1_inner * 32) + ((int)threadIdx.x)))] = ((float*)B)[(((((((int)blockIdx.x) * 16384) + (ax1_inner * 512)) + (k_outer * 32)) + ((int)threadIdx.x)))];
}
__syncthreads();
for (int k_inner = 0; k_inner < 32; ++k_inner) {
A_shared_local[(0)] = A_shared[(k_inner)];
B_shared_local[(0)] = B_shared[(((((int)threadIdx.x) * 32) + k_inner))];
compute_local[(0)] = (compute_local[(0)] + (A_shared_local[(0)] * B_shared_local[(0)]));
}
}
((float*)compute)[(((((int)blockIdx.x) * 32) + ((int)threadIdx.x)))] = compute_local[(0)];
}
| 0d76651c32944cd0d06fd99fb0ca6c0128faaebe.cu | extern "C" __global__ void default_function_kernel0(void* __restrict__ A, void* __restrict__ B, void* __restrict__ compute) {
float compute_local[1];
__shared__ float A_shared[32];
__shared__ float B_shared[1024];
float A_shared_local[1];
float B_shared_local[1];
compute_local[(0)] = 0.000000e+00f;
for (int k_outer = 0; k_outer < 16; ++k_outer) {
__syncthreads();
A_shared[(((int)threadIdx.x))] = ((float*)A)[(((k_outer * 32) + ((int)threadIdx.x)))];
for (int ax1_inner = 0; ax1_inner < 32; ++ax1_inner) {
B_shared[(((ax1_inner * 32) + ((int)threadIdx.x)))] = ((float*)B)[(((((((int)blockIdx.x) * 16384) + (ax1_inner * 512)) + (k_outer * 32)) + ((int)threadIdx.x)))];
}
__syncthreads();
for (int k_inner = 0; k_inner < 32; ++k_inner) {
A_shared_local[(0)] = A_shared[(k_inner)];
B_shared_local[(0)] = B_shared[(((((int)threadIdx.x) * 32) + k_inner))];
compute_local[(0)] = (compute_local[(0)] + (A_shared_local[(0)] * B_shared_local[(0)]));
}
}
((float*)compute)[(((((int)blockIdx.x) * 32) + ((int)threadIdx.x)))] = compute_local[(0)];
}
|
55df3e7d4a83a2d6506ec03ec2b5a19755407641.hip | // !!! This is a file automatically generated by hipify!!!
/*!
* Copyright 2019-2020 by Contributors
* \file survival_metric.cu
* \brief Metrics for survival analysis
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <rabit/rabit.h>
#include <dmlc/registry.h>
#include <memory>
#include <vector>
#include "xgboost/json.h"
#include "xgboost/metric.h"
#include "xgboost/host_device_vector.h"
#include "metric_common.h"
#include "../common/math.h"
#include "../common/survival_util.h"
#include "../common/threading_utils.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::hip::par
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
using AFTParam = xgboost::common::AFTParam;
using ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;
template <typename Distribution>
using AFTLoss = xgboost::common::AFTLoss<Distribution>;
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(survival_metric);
template <typename EvalRow>
class ElementWiseSurvivalMetricsReduction {
public:
ElementWiseSurvivalMetricsReduction() = default;
void Configure(EvalRow policy) {
policy_ = policy;
}
PackedReduceResult
CpuReduceMetrics(const HostDeviceVector<bst_float> &weights,
const HostDeviceVector<bst_float> &labels_lower_bound,
const HostDeviceVector<bst_float> &labels_upper_bound,
const HostDeviceVector<bst_float> &preds,
int32_t n_threads) const {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
const auto& h_labels_lower_bound = labels_lower_bound.HostVector();
const auto& h_labels_upper_bound = labels_upper_bound.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
std::vector<double> score_tloc(n_threads, 0.0);
std::vector<double> weight_tloc(n_threads, 0.0);
common::ParallelFor(ndata, n_threads, [&](size_t i) {
const double wt =
h_weights.empty() ? 1.0 : static_cast<double>(h_weights[i]);
auto t_idx = omp_get_thread_num();
score_tloc[t_idx] +=
policy_.EvalRow(static_cast<double>(h_labels_lower_bound[i]),
static_cast<double>(h_labels_upper_bound[i]),
static_cast<double>(h_preds[i])) *
wt;
weight_tloc[t_idx] += wt;
});
double residue_sum = std::accumulate(score_tloc.cbegin(), score_tloc.cend(), 0.0);
double weights_sum = std::accumulate(weight_tloc.cbegin(), weight_tloc.cend(), 0.0);
PackedReduceResult res{residue_sum, weights_sum};
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + ndata;
auto s_label_lower_bound = labels_lower_bound.DeviceSpan();
auto s_label_upper_bound = labels_upper_bound.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
const bool is_null_weight = (weights.Size() == 0);
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::hip::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
double weight = is_null_weight ? 1.0 : static_cast<double>(s_weights[idx]);
double residue = d_policy.EvalRow(
static_cast<double>(s_label_lower_bound[idx]),
static_cast<double>(s_label_upper_bound[idx]),
static_cast<double>(s_preds[idx]));
residue *= weight;
return PackedReduceResult{residue, weight};
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &ctx,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (ctx.gpu_id < 0) {
result = CpuReduceMetrics(weights, labels_lower_bound, labels_upper_bound,
preds, ctx.Threads());
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
preds.SetDevice(ctx.gpu_id);
labels_lower_bound.SetDevice(ctx.gpu_id);
labels_upper_bound.SetDevice(ctx.gpu_id);
weights.SetDevice(ctx.gpu_id);
dh::safe_cuda(hipSetDevice(ctx.gpu_id));
result = DeviceReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
};
struct EvalIntervalRegressionAccuracy {
void Configure(const Args& args) {}
const char* Name() const {
return "interval-regression-accuracy";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double log_pred) const {
const double pred = exp(log_pred);
return (pred >= label_lower_bound && pred <= label_upper_bound) ? 1.0 : 0.0;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/*! \brief Negative log likelihood of Accelerated Failure Time model */
template <typename Distribution>
struct EvalAFTNLogLik {
void Configure(const Args& args) {
param_.UpdateAllowUnknown(args);
}
const char* Name() const {
return "aft-nloglik";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double pred) const {
return AFTLoss<Distribution>::Loss(
label_lower_bound, label_upper_bound, pred, param_.aft_loss_distribution_scale);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
AFTParam param_;
};
template <typename Policy> struct EvalEWiseSurvivalBase : public Metric {
explicit EvalEWiseSurvivalBase(GenericParameter const *ctx) {
tparam_ = ctx;
}
EvalEWiseSurvivalBase() = default;
void Configure(const Args& args) override {
policy_.Configure(args);
reducer_.Configure(policy_);
CHECK(tparam_);
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
CHECK(tparam_);
auto result =
reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,
info.labels_upper_bound_, preds);
double dat[2] {result.Residue(), result.Weights()};
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return static_cast<bst_float>(Policy::GetFinal(dat[0], dat[1]));
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseSurvivalMetricsReduction<Policy> reducer_;
int device_{-1}; // used only for GPU metric
};
// This class exists because we want to perform dispatch according to the distribution type at
// configuration time, not at prediction time.
struct AFTNLogLikDispatcher : public Metric {
const char* Name() const override {
return "aft-nloglik";
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
return metric_->Eval(preds, info, distributed);
}
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
switch (param_.aft_loss_distribution) {
case common::ProbabilityDistributionType::kNormal:
metric_.reset(
new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::NormalDistribution>>(
tparam_));
break;
case common::ProbabilityDistributionType::kLogistic:
metric_.reset(new EvalEWiseSurvivalBase<
EvalAFTNLogLik<common::LogisticDistribution>>(tparam_));
break;
case common::ProbabilityDistributionType::kExtreme:
metric_.reset(new EvalEWiseSurvivalBase<
EvalAFTNLogLik<common::ExtremeDistribution>>(tparam_));
break;
default:
LOG(FATAL) << "Unknown probability distribution";
}
metric_->Configure(args);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(this->Name());
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(const Json& in) override {
FromJson(in["aft_loss_param"], ¶m_);
}
private:
AFTParam param_;
std::unique_ptr<Metric> metric_;
};
XGBOOST_REGISTER_METRIC(AFTNLogLik, "aft-nloglik")
.describe("Negative log likelihood of Accelerated Failure Time model.")
.set_body([](const char* param) {
return new AFTNLogLikDispatcher();
});
XGBOOST_REGISTER_METRIC(IntervalRegressionAccuracy, "interval-regression-accuracy")
.describe("")
.set_body([](const char* param) {
return new EvalEWiseSurvivalBase<EvalIntervalRegressionAccuracy>();
});
} // namespace metric
} // namespace xgboost
| 55df3e7d4a83a2d6506ec03ec2b5a19755407641.cu | /*!
* Copyright 2019-2020 by Contributors
* \file survival_metric.cu
* \brief Metrics for survival analysis
* \author Avinash Barnwal, Hyunsu Cho and Toby Hocking
*/
#include <rabit/rabit.h>
#include <dmlc/registry.h>
#include <memory>
#include <vector>
#include "xgboost/json.h"
#include "xgboost/metric.h"
#include "xgboost/host_device_vector.h"
#include "metric_common.h"
#include "../common/math.h"
#include "../common/survival_util.h"
#include "../common/threading_utils.h"
#if defined(XGBOOST_USE_CUDA)
#include <thrust/execution_policy.h> // thrust::cuda::par
#include "../common/device_helpers.cuh"
#endif // XGBOOST_USE_CUDA
using AFTParam = xgboost::common::AFTParam;
using ProbabilityDistributionType = xgboost::common::ProbabilityDistributionType;
template <typename Distribution>
using AFTLoss = xgboost::common::AFTLoss<Distribution>;
namespace xgboost {
namespace metric {
// tag the this file, used by force static link later.
DMLC_REGISTRY_FILE_TAG(survival_metric);
template <typename EvalRow>
class ElementWiseSurvivalMetricsReduction {
public:
ElementWiseSurvivalMetricsReduction() = default;
void Configure(EvalRow policy) {
policy_ = policy;
}
PackedReduceResult
CpuReduceMetrics(const HostDeviceVector<bst_float> &weights,
const HostDeviceVector<bst_float> &labels_lower_bound,
const HostDeviceVector<bst_float> &labels_upper_bound,
const HostDeviceVector<bst_float> &preds,
int32_t n_threads) const {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
const auto& h_labels_lower_bound = labels_lower_bound.HostVector();
const auto& h_labels_upper_bound = labels_upper_bound.HostVector();
const auto& h_weights = weights.HostVector();
const auto& h_preds = preds.HostVector();
std::vector<double> score_tloc(n_threads, 0.0);
std::vector<double> weight_tloc(n_threads, 0.0);
common::ParallelFor(ndata, n_threads, [&](size_t i) {
const double wt =
h_weights.empty() ? 1.0 : static_cast<double>(h_weights[i]);
auto t_idx = omp_get_thread_num();
score_tloc[t_idx] +=
policy_.EvalRow(static_cast<double>(h_labels_lower_bound[i]),
static_cast<double>(h_labels_upper_bound[i]),
static_cast<double>(h_preds[i])) *
wt;
weight_tloc[t_idx] += wt;
});
double residue_sum = std::accumulate(score_tloc.cbegin(), score_tloc.cend(), 0.0);
double weights_sum = std::accumulate(weight_tloc.cbegin(), weight_tloc.cend(), 0.0);
PackedReduceResult res{residue_sum, weights_sum};
return res;
}
#if defined(XGBOOST_USE_CUDA)
PackedReduceResult DeviceReduceMetrics(
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
size_t ndata = labels_lower_bound.Size();
CHECK_EQ(ndata, labels_upper_bound.Size());
thrust::counting_iterator<size_t> begin(0);
thrust::counting_iterator<size_t> end = begin + ndata;
auto s_label_lower_bound = labels_lower_bound.DeviceSpan();
auto s_label_upper_bound = labels_upper_bound.DeviceSpan();
auto s_preds = preds.DeviceSpan();
auto s_weights = weights.DeviceSpan();
const bool is_null_weight = (weights.Size() == 0);
auto d_policy = policy_;
dh::XGBCachingDeviceAllocator<char> alloc;
PackedReduceResult result = thrust::transform_reduce(
thrust::cuda::par(alloc),
begin, end,
[=] XGBOOST_DEVICE(size_t idx) {
double weight = is_null_weight ? 1.0 : static_cast<double>(s_weights[idx]);
double residue = d_policy.EvalRow(
static_cast<double>(s_label_lower_bound[idx]),
static_cast<double>(s_label_upper_bound[idx]),
static_cast<double>(s_preds[idx]));
residue *= weight;
return PackedReduceResult{residue, weight};
},
PackedReduceResult(),
thrust::plus<PackedReduceResult>());
return result;
}
#endif // XGBOOST_USE_CUDA
PackedReduceResult Reduce(
const GenericParameter &ctx,
const HostDeviceVector<bst_float>& weights,
const HostDeviceVector<bst_float>& labels_lower_bound,
const HostDeviceVector<bst_float>& labels_upper_bound,
const HostDeviceVector<bst_float>& preds) {
PackedReduceResult result;
if (ctx.gpu_id < 0) {
result = CpuReduceMetrics(weights, labels_lower_bound, labels_upper_bound,
preds, ctx.Threads());
}
#if defined(XGBOOST_USE_CUDA)
else { // NOLINT
preds.SetDevice(ctx.gpu_id);
labels_lower_bound.SetDevice(ctx.gpu_id);
labels_upper_bound.SetDevice(ctx.gpu_id);
weights.SetDevice(ctx.gpu_id);
dh::safe_cuda(cudaSetDevice(ctx.gpu_id));
result = DeviceReduceMetrics(weights, labels_lower_bound, labels_upper_bound, preds);
}
#endif // defined(XGBOOST_USE_CUDA)
return result;
}
private:
EvalRow policy_;
};
struct EvalIntervalRegressionAccuracy {
void Configure(const Args& args) {}
const char* Name() const {
return "interval-regression-accuracy";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double log_pred) const {
const double pred = exp(log_pred);
return (pred >= label_lower_bound && pred <= label_upper_bound) ? 1.0 : 0.0;
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
};
/*! \brief Negative log likelihood of Accelerated Failure Time model */
template <typename Distribution>
struct EvalAFTNLogLik {
void Configure(const Args& args) {
param_.UpdateAllowUnknown(args);
}
const char* Name() const {
return "aft-nloglik";
}
XGBOOST_DEVICE double EvalRow(
double label_lower_bound, double label_upper_bound, double pred) const {
return AFTLoss<Distribution>::Loss(
label_lower_bound, label_upper_bound, pred, param_.aft_loss_distribution_scale);
}
static double GetFinal(double esum, double wsum) {
return wsum == 0 ? esum : esum / wsum;
}
private:
AFTParam param_;
};
template <typename Policy> struct EvalEWiseSurvivalBase : public Metric {
explicit EvalEWiseSurvivalBase(GenericParameter const *ctx) {
tparam_ = ctx;
}
EvalEWiseSurvivalBase() = default;
void Configure(const Args& args) override {
policy_.Configure(args);
reducer_.Configure(policy_);
CHECK(tparam_);
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK_EQ(preds.Size(), info.labels_lower_bound_.Size());
CHECK_EQ(preds.Size(), info.labels_upper_bound_.Size());
CHECK(tparam_);
auto result =
reducer_.Reduce(*tparam_, info.weights_, info.labels_lower_bound_,
info.labels_upper_bound_, preds);
double dat[2] {result.Residue(), result.Weights()};
if (distributed) {
rabit::Allreduce<rabit::op::Sum>(dat, 2);
}
return static_cast<bst_float>(Policy::GetFinal(dat[0], dat[1]));
}
const char* Name() const override {
return policy_.Name();
}
private:
Policy policy_;
ElementWiseSurvivalMetricsReduction<Policy> reducer_;
int device_{-1}; // used only for GPU metric
};
// This class exists because we want to perform dispatch according to the distribution type at
// configuration time, not at prediction time.
struct AFTNLogLikDispatcher : public Metric {
const char* Name() const override {
return "aft-nloglik";
}
bst_float Eval(const HostDeviceVector<bst_float>& preds,
const MetaInfo& info,
bool distributed) override {
CHECK(metric_) << "AFT metric must be configured first, with distribution type and scale";
return metric_->Eval(preds, info, distributed);
}
void Configure(const Args& args) override {
param_.UpdateAllowUnknown(args);
switch (param_.aft_loss_distribution) {
case common::ProbabilityDistributionType::kNormal:
metric_.reset(
new EvalEWiseSurvivalBase<EvalAFTNLogLik<common::NormalDistribution>>(
tparam_));
break;
case common::ProbabilityDistributionType::kLogistic:
metric_.reset(new EvalEWiseSurvivalBase<
EvalAFTNLogLik<common::LogisticDistribution>>(tparam_));
break;
case common::ProbabilityDistributionType::kExtreme:
metric_.reset(new EvalEWiseSurvivalBase<
EvalAFTNLogLik<common::ExtremeDistribution>>(tparam_));
break;
default:
LOG(FATAL) << "Unknown probability distribution";
}
metric_->Configure(args);
}
void SaveConfig(Json* p_out) const override {
auto& out = *p_out;
out["name"] = String(this->Name());
out["aft_loss_param"] = ToJson(param_);
}
void LoadConfig(const Json& in) override {
FromJson(in["aft_loss_param"], ¶m_);
}
private:
AFTParam param_;
std::unique_ptr<Metric> metric_;
};
XGBOOST_REGISTER_METRIC(AFTNLogLik, "aft-nloglik")
.describe("Negative log likelihood of Accelerated Failure Time model.")
.set_body([](const char* param) {
return new AFTNLogLikDispatcher();
});
XGBOOST_REGISTER_METRIC(IntervalRegressionAccuracy, "interval-regression-accuracy")
.describe("")
.set_body([](const char* param) {
return new EvalEWiseSurvivalBase<EvalIntervalRegressionAccuracy>();
});
} // namespace metric
} // namespace xgboost
|
601dbc8ec425b37ecb973a84281a902a7bebb96b.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::host_vector<int> host_in(idata, idata + n);
thrust::device_vector<int> dv_in = host_in;
thrust::device_vector<int> dv_out(n);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
| 601dbc8ec425b37ecb973a84281a902a7bebb96b.cu | #include <cuda.h>
#include <cuda_runtime.h>
#include <thrust/device_vector.h>
#include <thrust/host_vector.h>
#include <thrust/scan.h>
#include "common.h"
#include "thrust.h"
namespace StreamCompaction {
namespace Thrust {
using StreamCompaction::Common::PerformanceTimer;
PerformanceTimer& timer()
{
static PerformanceTimer timer;
return timer;
}
/**
* Performs prefix-sum (aka scan) on idata, storing the result into odata.
*/
void scan(int n, int *odata, const int *idata) {
// TODO use `thrust::exclusive_scan`
// example: for device_vectors dv_in and dv_out:
// thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
thrust::host_vector<int> host_in(idata, idata + n);
thrust::device_vector<int> dv_in = host_in;
thrust::device_vector<int> dv_out(n);
timer().startGpuTimer();
thrust::exclusive_scan(dv_in.begin(), dv_in.end(), dv_out.begin());
timer().endGpuTimer();
thrust::copy(dv_out.begin(), dv_out.end(), odata);
}
}
}
|
a58506c89a97dc796b12af83b84402d13310dc0b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#define CHECK(call) \
{ \
const hipError_t error = call; \
if (error != hipSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
hipGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
hipEvent_t start;
hipEvent_t stop;
GpuTimer()
{
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer()
{
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start()
{
hipEventRecord(start, 0);
}
void Stop()
{
hipEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void readPnm(char * fileName, int &width, int &height, uchar3 * &pixels)
{
FILE * f = fopen(fileName, "r");
if (f == NULL)
{
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
if (strcmp(type, "P3") != 0) // In this exercise, we don't touch other types
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int max_val;
fscanf(f, "%i", &max_val);
if (max_val > 255) // In this exercise, we assume 1 byte per value
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i < width * height; i++)
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
fclose(f);
}
void writePnm(uchar3 * pixels, int width, int height, char * fileName)
{
FILE * f = fopen(fileName, "w");
if (f == NULL)
{
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++)
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
fclose(f);
}
__global__ void blurImgKernel(uchar3 * inPixels, int width, int height,
float * filter, int filterWidth,
uchar3 * outPixels)
{
// TODO
}
void blurImg(uchar3 * inPixels, int width, int height, float * filter, int filterWidth,
uchar3 * outPixels,
bool useDevice=false, dim3 blockSize=dim3(1, 1))
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
for (int outPixelsR = 0; outPixelsR < height; outPixelsR++)
{
for (int outPixelsC = 0; outPixelsC < width; outPixelsC++)
{
float3 outPixel = make_float3(0, 0, 0);
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
float filterVal = filter[filterR * filterWidth + filterC];
int inPixelsR = (outPixelsR - filterWidth/2) + filterR;
int inPixelsC = (outPixelsC - filterWidth/2) + filterC;
inPixelsR = min(height - 1, max(0, inPixelsR));
inPixelsC = min(width - 1, max(0, inPixelsC));
uchar3 inPixel = inPixels[inPixelsR * width + inPixelsC];
outPixel.x += filterVal * inPixel.x;
outPixel.y += filterVal * inPixel.y;
outPixel.z += filterVal * inPixel.z;
}
}
outPixels[outPixelsR * width + outPixelsC] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
}
else // Use device
{
hipDeviceProp_t devProp;
hipGetDeviceProperties(&devProp, 0);
printf("GPU name: %s\n", devProp.name);
printf("GPU compute capability: %d.%d\n", devProp.major, devProp.minor);
// TODO
}
timer.Stop();
float time = timer.Elapsed();
printf("Processing time (%s): %f ms\n\n",
useDevice == true? "use device" : "use host", time);
}
float computeError(uchar3 * a1, uchar3 * a2, int n)
{
float err = 0;
for (int i = 0; i < n; i++)
{
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
char * concatStr(const char * s1, const char * s2)
{
char * result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
int main(int argc, char ** argv)
{
if (argc !=3 && argc != 5)
{
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
// Read input image file
int width, height;
uchar3 * inPixels;
readPnm(argv[1], width, height, inPixels);
printf("Image size (width x height): %i x %i\n\n", width, height);
// Set up a simple filter with blurring effect
int filterWidth = 9;
float * filter = (float *)malloc(filterWidth * filterWidth * sizeof(float));
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
filter[filterR * filterWidth + filterC] = 1. / (filterWidth * filterWidth);
}
}
// Blur input image not using device
uchar3 * correctOutPixels= (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, correctOutPixels);
// Blur input image using device
uchar3 * outPixels= (uchar3 *)malloc(width * height * sizeof(uchar3));
dim3 blockSize(32, 32); // Default
if (argc == 5)
{
blockSize.x = atoi(argv[3]);
blockSize.y = atoi(argv[4]);
}
blurImg(inPixels, width, height, filter, filterWidth, outPixels, true, blockSize);
// Compute mean absolute error between host result and device result
float err = computeError(outPixels, correctOutPixels, width * height);
printf("Error between device result and host result: %f\n", err);
// Write results to files
char * outFileNameBase = strtok(argv[2], "."); // Get rid of extension
writePnm(correctOutPixels, width, height, concatStr(outFileNameBase, "_host.pnm"));
writePnm(outPixels, width, height, concatStr(outFileNameBase, "_device.pnm"));
// Free memories
free(inPixels);
free(outPixels);
free(filter);
} | a58506c89a97dc796b12af83b84402d13310dc0b.cu | #include <stdio.h>
#include <stdint.h>
#define CHECK(call) \
{ \
const cudaError_t error = call; \
if (error != cudaSuccess) \
{ \
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__); \
fprintf(stderr, "code: %d, reason: %s\n", error, \
cudaGetErrorString(error)); \
exit(EXIT_FAILURE); \
} \
}
struct GpuTimer
{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer()
{
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer()
{
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start()
{
cudaEventRecord(start, 0);
}
void Stop()
{
cudaEventRecord(stop, 0);
}
float Elapsed()
{
float elapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed, start, stop);
return elapsed;
}
};
void readPnm(char * fileName, int &width, int &height, uchar3 * &pixels)
{
FILE * f = fopen(fileName, "r");
if (f == NULL)
{
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
if (strcmp(type, "P3") != 0) // In this exercise, we don't touch other types
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int max_val;
fscanf(f, "%i", &max_val);
if (max_val > 255) // In this exercise, we assume 1 byte per value
{
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i < width * height; i++)
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
fclose(f);
}
void writePnm(uchar3 * pixels, int width, int height, char * fileName)
{
FILE * f = fopen(fileName, "w");
if (f == NULL)
{
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++)
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
fclose(f);
}
__global__ void blurImgKernel(uchar3 * inPixels, int width, int height,
float * filter, int filterWidth,
uchar3 * outPixels)
{
// TODO
}
void blurImg(uchar3 * inPixels, int width, int height, float * filter, int filterWidth,
uchar3 * outPixels,
bool useDevice=false, dim3 blockSize=dim3(1, 1))
{
GpuTimer timer;
timer.Start();
if (useDevice == false)
{
for (int outPixelsR = 0; outPixelsR < height; outPixelsR++)
{
for (int outPixelsC = 0; outPixelsC < width; outPixelsC++)
{
float3 outPixel = make_float3(0, 0, 0);
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
float filterVal = filter[filterR * filterWidth + filterC];
int inPixelsR = (outPixelsR - filterWidth/2) + filterR;
int inPixelsC = (outPixelsC - filterWidth/2) + filterC;
inPixelsR = min(height - 1, max(0, inPixelsR));
inPixelsC = min(width - 1, max(0, inPixelsC));
uchar3 inPixel = inPixels[inPixelsR * width + inPixelsC];
outPixel.x += filterVal * inPixel.x;
outPixel.y += filterVal * inPixel.y;
outPixel.z += filterVal * inPixel.z;
}
}
outPixels[outPixelsR * width + outPixelsC] = make_uchar3(outPixel.x, outPixel.y, outPixel.z);
}
}
}
else // Use device
{
cudaDeviceProp devProp;
cudaGetDeviceProperties(&devProp, 0);
printf("GPU name: %s\n", devProp.name);
printf("GPU compute capability: %d.%d\n", devProp.major, devProp.minor);
// TODO
}
timer.Stop();
float time = timer.Elapsed();
printf("Processing time (%s): %f ms\n\n",
useDevice == true? "use device" : "use host", time);
}
float computeError(uchar3 * a1, uchar3 * a2, int n)
{
float err = 0;
for (int i = 0; i < n; i++)
{
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
char * concatStr(const char * s1, const char * s2)
{
char * result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
int main(int argc, char ** argv)
{
if (argc !=3 && argc != 5)
{
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
// Read input image file
int width, height;
uchar3 * inPixels;
readPnm(argv[1], width, height, inPixels);
printf("Image size (width x height): %i x %i\n\n", width, height);
// Set up a simple filter with blurring effect
int filterWidth = 9;
float * filter = (float *)malloc(filterWidth * filterWidth * sizeof(float));
for (int filterR = 0; filterR < filterWidth; filterR++)
{
for (int filterC = 0; filterC < filterWidth; filterC++)
{
filter[filterR * filterWidth + filterC] = 1. / (filterWidth * filterWidth);
}
}
// Blur input image not using device
uchar3 * correctOutPixels= (uchar3 *)malloc(width * height * sizeof(uchar3));
blurImg(inPixels, width, height, filter, filterWidth, correctOutPixels);
// Blur input image using device
uchar3 * outPixels= (uchar3 *)malloc(width * height * sizeof(uchar3));
dim3 blockSize(32, 32); // Default
if (argc == 5)
{
blockSize.x = atoi(argv[3]);
blockSize.y = atoi(argv[4]);
}
blurImg(inPixels, width, height, filter, filterWidth, outPixels, true, blockSize);
// Compute mean absolute error between host result and device result
float err = computeError(outPixels, correctOutPixels, width * height);
printf("Error between device result and host result: %f\n", err);
// Write results to files
char * outFileNameBase = strtok(argv[2], "."); // Get rid of extension
writePnm(correctOutPixels, width, height, concatStr(outFileNameBase, "_host.pnm"));
writePnm(outPixels, width, height, concatStr(outFileNameBase, "_device.pnm"));
// Free memories
free(inPixels);
free(outPixels);
free(filter);
} |
b833c81bdf6312dc74543661493e94f1de34ef37.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2020 NVIDIA CORPORATION.
* Copyright (c) 2018-2020 Chris Choy ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map_functors.cuh"
#include "coordinate_map_gpu.cuh"
#include "gpu.cuh"
#include "kernel_map.cuh"
#include "kernel_map.hpp"
#include "sharedmem.cuh"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/sort.h>
namespace minkowski {
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
remap_inverse_map(map_type __restrict__ map, //
coordinate_type const *__restrict__ coordinates, //
index_type *__restrict__ inverse_map, //
size_type const num_threads, //
size_type const coordinate_size //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
auto result = map.find(
coordinate<coordinate_type>{&coordinates[x * coordinate_size]});
inverse_map[x] = result->second;
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
insert_and_map_kernel(map_type __restrict__ map, //
coordinate_type const *__restrict__ coordinates, //
index_type *__restrict__ valid_map_index, //
index_type *__restrict__ valid_row_index, //
size_type const num_threads, //
size_type const coordinate_size, //
index_type const unused_key) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
// Returns pair<iterator, (bool)insert_success>
auto const result = map.insert(thrust::make_pair(
coordinate<coordinate_type>{&coordinates[x * coordinate_size]}, x));
// auto test = &coordinates[x * coordinate_size];
if (result.second) {
valid_row_index[x] = x;
// success map index. remove failed insertion with success.
valid_map_index[x] = result.first.offset();
} else {
valid_map_index[x] = unused_key;
}
}
}
} // namespace detail
/*
* Field Map
*/
namespace detail {
template <typename coordinate_field_type, typename coordinate_int_type,
typename index_type, bool stride_one>
__global__ void quantize_coordinates_kernel(
coordinate_field_type const *__restrict__ p_tfield, //
coordinate_int_type *__restrict__ p_stensor, //
index_type const *__restrict__ p_tensor_stride, //
index_type const num_threads, index_type const coordinate_size) {
// coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type)
// + THREADS * coordinate_size * sizeof(coordinate_type)
extern __shared__ index_type sh_tensor_stride[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (stride_one) {
if (x < num_threads) {
if (x % coordinate_size == 0)
p_stensor[x] = lrint(p_tfield[x]);
else
p_stensor[x] = floor(p_tfield[x]);
}
} else {
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = p_tensor_stride[i];
}
__syncthreads();
if (x < num_threads) {
// batch index
if (x % coordinate_size == 0)
p_stensor[x] = lrint(p_tfield[x]);
else {
index_type curr_tensor_stride =
sh_tensor_stride[((x - 1) % coordinate_size)];
p_stensor[x] =
floor(p_tfield[x] / curr_tensor_stride) * curr_tensor_stride;
}
}
}
}
} // namespace detail
template <typename coordinate_field_type, typename coordinate_int_type,
template <typename T> class TemplatedAllocator>
void CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type,
TemplatedAllocator>::
quantize_coordinates(coordinate_int_type *d_dst_coordinates,
stride_type const &tensor_stride) const {
int64_t const stride_prod = std::accumulate(
tensor_stride.begin(), tensor_stride.end(), 1, std::multiplies<>());
// Copy tensor_stride to device
index_type *d_tensor_stride = reinterpret_cast<index_type *>(
m_byte_allocator.allocate(m_coordinate_size * sizeof(index_type)));
CUDA_CHECK(hipMemcpy(
d_tensor_stride, // dst
tensor_stride.data(), // first element of the dereferenced iter.
sizeof(index_type) * m_coordinate_size, // bytes
hipMemcpyHostToDevice));
size_type const num_threads = size() * m_coordinate_size;
auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
if (stride_prod == 1) {
hipLaunchKernelGGL(( detail::quantize_coordinates_kernel<coordinate_field_type,
coordinate_int_type, index_type, true>)
, dim3(num_blocks), dim3(CUDA_NUM_THREADS),
m_coordinate_size * sizeof(index_type), 0,
const_coordinate_data(), d_dst_coordinates, d_tensor_stride,
num_threads, m_coordinate_size);
} else {
hipLaunchKernelGGL(( detail::quantize_coordinates_kernel<coordinate_field_type,
coordinate_int_type, index_type, false>)
, dim3(num_blocks), dim3(CUDA_NUM_THREADS),
m_coordinate_size * sizeof(index_type), 0,
const_coordinate_data(), d_dst_coordinates, d_tensor_stride,
num_threads, m_coordinate_size);
}
}
/*
* @brief Given a key iterator begin-end pair and a value iterator begin-end
* pair, insert all elements.
*
* @note The key and value iterators can be 1) pointers, 2) coordinate or vector
* iterators.
*
* @return none
*/
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
template <bool remap>
void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::insert(
coordinate_iterator<coordinate_type> key_first,
coordinate_iterator<coordinate_type> key_last) {
size_type const N = key_last - key_first;
LOG_DEBUG("key iterator length", N);
if (N == 0) {
m_size = 0;
return;
}
m_valid_row_index.allocate(N);
m_valid_map_index.allocate(N);
// Copy the coordinates to m_coordinate
base_type::reserve(N);
CUDA_CHECK(
hipMemcpy(coordinate_data(), // dst
key_first->data(), // first element of the dereferenced iter.
sizeof(coordinate_type) * N * m_coordinate_size, // bytes
hipMemcpyDeviceToDevice));
CUDA_CHECK(hipDeviceSynchronize());
LOG_DEBUG("Reserved and copiedm", N, "x", m_coordinate_size, "coordinates");
// compute cuda kernel call params
size_type const num_threads = N;
LOG_DEBUG("nm_threads", num_threads);
size_type const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
LOG_DEBUG("nm_blocks", num_blocks);
index_type const unused_key = std::numeric_limits<index_type>::max();
LOG_DEBUG("unused_key", unused_key);
hipLaunchKernelGGL(( detail::insert_and_map_kernel<coordinate_type, size_type, index_type,
map_type>), dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0,
*m_map, //
const_coordinate_data(), //
m_valid_map_index.data(), //
m_valid_row_index.data(), //
num_threads, m_coordinate_size, unused_key);
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("Map size:", m_map->size());
// Valid row index
auto valid_begin = thrust::make_zip_iterator(
thrust::make_tuple(m_valid_map_index.begin(), m_valid_row_index.begin()));
size_type const number_of_valid =
thrust::remove_if(thrust::device, valid_begin,
thrust::make_zip_iterator(thrust::make_tuple(
m_valid_map_index.end(), m_valid_row_index.end())),
detail::is_first<index_type>(unused_key)) -
valid_begin;
m_valid_row_index.resize(number_of_valid);
m_valid_map_index.resize(number_of_valid);
m_size = number_of_valid;
LOG_DEBUG("Number of successful insertion", m_size);
if (remap // When remapping
&& number_of_valid != N // when the # of inserted items differ from the #
// of successful insertions
) {
m_inverse_row_index.allocate(N);
thrust::counting_iterator<uint32_t> count_begin{0};
thrust::for_each(count_begin, count_begin + number_of_valid,
detail::update_value_with_offset<index_type, map_type>{
*m_map, m_valid_map_index.data()});
size_type const num_threads = N;
auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
hipLaunchKernelGGL(( detail::remap_inverse_map<coordinate_type, size_type, index_type, map_type>)
, dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, *m_map, //
const_coordinate_data(), //
m_inverse_row_index.data(), //
num_threads, m_coordinate_size);
LOG_DEBUG("Remapping finished");
}
} // namespace minkowski
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
template <bool remap>
std::pair<gpu_storage<default_types::index_type, TemplatedAllocator<char>>,
gpu_storage<default_types::index_type, TemplatedAllocator<char>>>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::insert_and_map(
coordinate_iterator<coordinate_type> key_first,
coordinate_iterator<coordinate_type> key_last) {
LOG_DEBUG("insert_and_map");
insert<remap>(key_first, key_last);
return std::make_pair(m_valid_row_index, m_inverse_row_index);
}
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::
initialize_valid_indices(size_t const N_unique) {
m_valid_row_index.resize(N_unique);
m_valid_map_index.resize(N_unique);
m_size = N_unique;
// Insert coordinates
auto insert = detail::insert_coordinate<coordinate_type, map_type,
index_type *>{
*m_map, // map
const_coordinate_data(), // coordinates,
m_valid_row_index.data(), // valid row
m_valid_map_index.data(), // iter offset
m_coordinate_size};
thrust::counting_iterator<uint32_t> count_begin{0};
thrust::for_each(thrust::device, count_begin, count_begin + N_unique, insert);
}
/*
* @brief given a key iterator begin-end pair find all valid keys and its
* index.
*
* @return a pair of (valid index, query value) vectors.
*/
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
std::pair<gpu_storage<default_types::index_type, TemplatedAllocator<char>>,
gpu_storage<default_types::index_type, TemplatedAllocator<char>>>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::find(
coordinate_iterator<coordinate_type> key_first,
coordinate_iterator<coordinate_type> key_last) const {
size_type N = key_last - key_first;
LOG_DEBUG(N, "queries for find.");
auto const find_functor = detail::find_coordinate<coordinate_type, map_type>(
*m_map, key_first->data(), m_unused_element, m_coordinate_size);
LOG_DEBUG("Find functor initialized.");
auto const invalid_functor =
detail::is_unused_pair<coordinate_type, mapped_type>(m_unused_element);
LOG_DEBUG("Valid functor initialized.");
thrust::counting_iterator<index_type> index{0};
gpu_storage<index_type, byte_allocator_type> input_index(N);
gpu_storage<index_type, byte_allocator_type> results(N);
LOG_DEBUG("Initialized functors.");
thrust::sequence(thrust::device, input_index.begin(), input_index.end());
thrust::transform(thrust::device, index, index + N, results.begin(),
find_functor);
size_type const number_of_valid =
thrust::remove_if(thrust::device,
thrust::make_zip_iterator(thrust::make_tuple(
input_index.begin(), results.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
input_index.end(), results.end())),
invalid_functor) -
thrust::make_zip_iterator(
thrust::make_tuple(input_index.begin(), results.begin()));
LOG_DEBUG("Number of valid", number_of_valid);
input_index.resize(number_of_valid);
results.resize(number_of_valid);
return std::make_pair(input_index, results);
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type>
__global__ void
stride_copy(coordinate_type const *__restrict__ src_coordinates, //
index_type const *__restrict__ src_valid_row_index, //
index_type const *__restrict__ stride, //
coordinate_type *__restrict__ dst_coordinates, //
size_type const num_threads, size_type const coordinate_size) {
extern __shared__ size_type sh_stride[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x)
sh_stride[i] = stride[i];
__syncthreads();
if (x < num_threads) {
const index_type src_start = src_valid_row_index[x] * coordinate_size;
const index_type dst_start = x * coordinate_size;
dst_coordinates[dst_start] = src_coordinates[src_start];
for (index_type j = 1; j < coordinate_size; ++j) {
dst_coordinates[dst_start + j] =
(__float2int_rd(
__fdiv_rd(src_coordinates[src_start + j], sh_stride[j - 1]))) *
sh_stride[j - 1];
// (__double2int_rd(
// __ddiv_rn(src_coordinates[src_start + j], sh_stride[j - 1]))) *
// sh_stride[j - 1];
}
}
}
} // namespace detail
/*
* @brief given a key iterator begin-end pair find all valid keys and its
* index.
*
* @return a pair of (valid index, query value) vectors.
*/
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride(
stride_type const &stride) const {
// Over estimate the reserve size to be size();
size_type const N = size();
LOG_DEBUG("Strided map with kernel stride:", stride);
self_type stride_map(
N, m_coordinate_size, m_hashtable_occupancy,
detail::stride_tensor_stride(base_type::m_tensor_stride, stride),
m_map_allocator, base_type::m_byte_allocator);
index_storage_type out_device_tensor_stride(stride_map.get_tensor_stride());
// stride coordinates
size_type const num_threads = N;
auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
hipLaunchKernelGGL(( detail::stride_copy<coordinate_type, size_type, index_type>)
, dim3(num_blocks), dim3(CUDA_NUM_THREADS), m_coordinate_size * sizeof(size_type), 0,
const_coordinate_data(), //
m_valid_row_index.cbegin(), //
out_device_tensor_stride.cbegin(), //
stride_map.coordinate_data(), //
num_threads, m_coordinate_size);
LOG_DEBUG("Stride copy done.");
auto &stride_valid_row_index = stride_map.m_valid_row_index;
auto &stride_valid_map_index = stride_map.m_valid_map_index;
stride_valid_row_index.resize(N); // row indices
stride_valid_map_index.resize(N); // map offset
// Insert coordinates
index_type const unused_key = std::numeric_limits<index_type>::max();
LOG_DEBUG("unused_key", unused_key);
hipLaunchKernelGGL(( detail::insert_and_map_kernel<coordinate_type, size_type, index_type,
map_type>), dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0,
*stride_map.m_map, //
stride_map.const_coordinate_data(), //
stride_valid_map_index.data(), //
stride_valid_row_index.data(), //
num_threads, m_coordinate_size, unused_key);
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("Stride map insertion complete");
// Valid row index
auto valid_begin = thrust::make_zip_iterator(
thrust::make_tuple(stride_valid_map_index.begin(), //
stride_valid_row_index.begin()));
size_type const number_of_valid =
thrust::remove_if(thrust::device, //
valid_begin, //
thrust::make_zip_iterator(
thrust::make_tuple(stride_valid_map_index.end(), //
stride_valid_row_index.end())),
detail::is_first<index_type>(unused_key)) -
valid_begin;
stride_valid_row_index.resize(number_of_valid);
stride_valid_map_index.resize(number_of_valid);
stride_map.m_size = number_of_valid;
LOG_DEBUG("Reduced to", number_of_valid);
// remap values
thrust::counting_iterator<uint32_t> count_begin{0};
thrust::for_each(count_begin, count_begin + number_of_valid,
detail::update_value_with_offset<index_type, map_type>{
*stride_map.m_map, stride_map.m_valid_map_index.data()});
LOG_DEBUG("Stride remap done");
return stride_map;
}
namespace detail {
template <typename coordinate_type, typename index_type>
__device__ bool is_coordinate_aligned(coordinate_type *point,
index_type *out_tensor_stride,
uint32_t const size) {
for (uint32_t i = 0; i < size - 1; ++i) {
if (point[i + 1] % out_tensor_stride[i] != 0)
return false;
}
return true;
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void kernel_region_insert(
size_type const num_threads, //
map_type __restrict__ out_map, //
coordinate_type const *const __restrict__ p_in_coordinates, //
index_type const *const __restrict__ in_valid_row_index, //
coordinate_type *__restrict__ p_out_coordinates, //
index_type *__restrict__ out_valid_row_index, //
index_type *__restrict__ out_valid_map_index, //
gpu_kernel_region<coordinate_type> kernel, //
size_type const *const __restrict__ out_tensor_stride, //
index_type const unused_key) { //
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type const coordinate_size = kernel.coordinate_size();
size_type const volume = kernel.volume();
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_tensor_stride = sh_size;
size_type *sh_kernel_size = sh_tensor_stride + coordinate_size;
size_type *sh_dilation = sh_kernel_size + coordinate_size;
size_type *sh_out_tensor_stride = sh_dilation + coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_out_tensor_stride + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = kernel.tensor_stride()[i];
sh_kernel_size[i] = kernel.kernel_size()[i];
sh_dilation[i] = kernel.dilation()[i];
sh_out_tensor_stride[i] = out_tensor_stride[i];
}
__syncthreads();
auto sh_kernel = gpu_kernel_region<coordinate_type>(
kernel, sh_tensor_stride, sh_kernel_size, sh_dilation);
coordinate<coordinate_type> curr_coordinate(sh_tmp);
if (x < num_threads) {
// iterate over values
index_type out_index = x * volume;
// set bounds for the valid keys
for (uint32_t kernel_ind = 0; kernel_ind < volume; ++kernel_ind) {
sh_kernel.coordinate_at(
kernel_ind,
&p_in_coordinates[in_valid_row_index[x] * coordinate_size], sh_tmp);
// Creating generative conv transpose
if (kernel.is_transpose()) {
// initialize out coordinate
for (uint32_t i = 0; i < coordinate_size; ++i)
p_out_coordinates[out_index * coordinate_size + i] =
curr_coordinate[i];
auto const result = out_map.insert(thrust::make_pair(
coordinate<coordinate_type>{
&p_out_coordinates[out_index * coordinate_size]},
out_index));
if (result.second) {
// row index in the out_coordinates
out_valid_row_index[out_index] = out_index;
// offset in the coordinate map
out_valid_map_index[out_index] = result.first.offset();
} else {
out_valid_row_index[out_index] = unused_key;
}
++out_index;
} else {
// skip if the coordinate is not aligned
if (!is_coordinate_aligned(sh_tmp, sh_out_tensor_stride,
coordinate_size)) {
out_valid_row_index[out_index] = unused_key;
++out_index;
} else {
// initialize out coordinate
for (uint32_t i = 0; i < coordinate_size; ++i)
p_out_coordinates[out_index * coordinate_size + i] =
curr_coordinate[i];
auto const result = out_map.insert(thrust::make_pair(
coordinate<coordinate_type>{
&p_out_coordinates[out_index * coordinate_size]},
out_index));
if (result.second) {
// row index in the out_coordinates
out_valid_row_index[out_index] = out_index;
// offset in the coordinate map
out_valid_map_index[out_index] = result.first.offset();
} else {
out_valid_row_index[out_index] = unused_key;
}
++out_index;
}
}
}
}
}
} // namespace detail
/*
* @brief generate a region strided coordinate map
*
* @return a gpu_coordinate_map
*/
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride_region(
cpu_kernel_region<coordinate_type> &kernel,
stride_type const &out_tensor_stride) const {
ASSERT(m_coordinate_size == kernel.coordinate_size(),
"Invalid kernel coordinate_size");
gpu_kernel_region<coordinate_type> gpu_kernel(kernel.to_gpu());
// Over estimate the reserve size to be size();
size_type const N_in = size();
size_type const N_out = N_in * kernel.volume();
LOG_DEBUG("Stride region out tensor stride:", out_tensor_stride,
"with capacity:", N_out);
self_type stride_map(N_out, m_coordinate_size, m_hashtable_occupancy,
out_tensor_stride, m_map_allocator,
base_type::m_byte_allocator);
index_storage_type d_out_tensor_stride(out_tensor_stride);
auto &out_valid_row_index = stride_map.m_valid_row_index;
auto &out_valid_map_index = stride_map.m_valid_map_index;
out_valid_row_index.resize(N_out);
out_valid_map_index.resize(N_out);
index_type const unused_key = std::numeric_limits<index_type>::max();
// (THREAD * D + 3 * D) * 4
uint32_t const shared_memory_size_in_bytes =
4 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation
CUDA_NUM_THREADS * m_coordinate_size * sizeof(coordinate_type); // tmp
hipLaunchKernelGGL(( detail::kernel_region_insert<coordinate_type, size_type, index_type, map_type>)
, dim3(GET_BLOCKS(N_in, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS),
shared_memory_size_in_bytes, 0, N_in, //
*stride_map.m_map, //
const_coordinate_data(), //
m_valid_row_index.cbegin(), //
stride_map.coordinate_data(), //
out_valid_row_index.data(), //
out_valid_map_index.data(), //
gpu_kernel, //
d_out_tensor_stride.cbegin(), //
unused_key); //
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("kernel_region_insert done");
// LOG_DEBUG("valid row index", out_valid_row_index);
// LOG_DEBUG("valid map offset", out_valid_map_index);
// remove unused_keys
auto valid_begin = thrust::make_zip_iterator(
thrust::make_tuple(out_valid_row_index.begin(), //
out_valid_map_index.begin()));
size_type const number_of_valid =
thrust::remove_if(thrust::device, //
valid_begin, //
thrust::make_zip_iterator(
thrust::make_tuple(out_valid_row_index.end(), //
out_valid_map_index.end())),
detail::is_first<index_type>(unused_key)) -
valid_begin;
out_valid_row_index.resize(number_of_valid);
out_valid_map_index.resize(number_of_valid);
stride_map.m_size = number_of_valid;
LOG_DEBUG("Reduced to", number_of_valid);
// remap values
thrust::counting_iterator<index_type> count_begin{0};
thrust::for_each(count_begin, count_begin + number_of_valid,
detail::update_value_with_offset<index_type, map_type>{
*stride_map.m_map, out_valid_map_index.data()});
LOG_DEBUG("Stride remap done");
return stride_map;
}
namespace detail {
template <typename dst_coordinate_type, typename src_coordinate_type,
typename size_type, typename index_type, bool stride_src>
__global__ void copy_column_with_valid(
dst_coordinate_type *__restrict__ dst_coordinates, //
size_type const num_threads, //
src_coordinate_type const *__restrict__ src_coordinates, //
index_type const *__restrict__ src_valid_row_index, //
size_type const coordinate_size) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
if (stride_src)
dst_coordinates[x] =
src_coordinates[src_valid_row_index[x] * coordinate_size];
else
dst_coordinates[x * coordinate_size] =
src_coordinates[src_valid_row_index[x]];
}
}
template <typename dst_coordinate_type, typename src_coordinate_type,
typename size_type, bool stride_src>
__global__ void
copy_column(dst_coordinate_type *__restrict__ dst_coordinates, //
size_type const num_threads, //
src_coordinate_type const *__restrict__ src_coordinates, //
size_type const coordinate_size) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
if (stride_src)
dst_coordinates[x] = src_coordinates[x * coordinate_size];
else
dst_coordinates[x * coordinate_size] = src_coordinates[x];
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::origin() const {
size_type const N = size();
LOG_DEBUG("Origin map from in map size:", N);
// tensor stride is set to {0,..., 0} for the origin map.
stride_type origin_tensor_stride(m_coordinate_size - 1);
std::for_each(origin_tensor_stride.begin(), origin_tensor_stride.end(),
[](auto &i) { i = 0; });
// thrust unique for unique batch index
coordinate_type *d_batch_indices = reinterpret_cast<coordinate_type *>(
m_byte_allocator.allocate(N * sizeof(coordinate_type)));
hipLaunchKernelGGL(( detail::copy_column_with_valid<coordinate_type, coordinate_type, size_type,
index_type, true>)
, dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0,
d_batch_indices, N, const_coordinate_data(),
m_valid_row_index.cbegin(), m_coordinate_size);
#ifdef DEBUG
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("copied batch indices");
#endif
// Sort and unique
thrust::sort(thrust::device, d_batch_indices, d_batch_indices + N);
#ifdef DEBUG
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("sorted batch indices");
#endif
auto d_batch_indices_end =
thrust::unique(thrust::device, d_batch_indices, d_batch_indices + N);
size_type const N_unique = d_batch_indices_end - d_batch_indices;
#ifdef DEBUG
size_t Nsize = std::min<int>(N_unique, 100);
std::vector<coordinate_type> tmp(Nsize);
CUDA_CHECK(hipMemcpy(tmp.data(), d_batch_indices,
Nsize * sizeof(coordinate_type),
hipMemcpyDeviceToHost));
LOG_DEBUG("sort and unique batch", tmp);
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("unique done");
#endif
// Create origin map
LOG_DEBUG("Origin map with size:", N_unique,
" tensor stride:", origin_tensor_stride);
self_type origin_map(N_unique, m_coordinate_size, m_hashtable_occupancy,
origin_tensor_stride, m_map_allocator,
base_type::m_byte_allocator);
CUDA_CHECK(
hipMemset(origin_map.coordinate_data(), 0,
N_unique * m_coordinate_size * sizeof(coordinate_type)));
hipLaunchKernelGGL(( detail::copy_column<coordinate_type, coordinate_type, size_type, false>)
, dim3(GET_BLOCKS(N_unique, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0,
origin_map.coordinate_data(), N_unique, d_batch_indices,
m_coordinate_size);
#ifdef DEBUG
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("copied batch indices to the origin_map");
#endif
auto &origin_valid_row_index = origin_map.m_valid_row_index;
auto &origin_valid_map_index = origin_map.m_valid_map_index;
origin_valid_row_index.resize(N_unique);
origin_valid_map_index.resize(N_unique);
origin_map.m_size = N_unique;
// Insert coordinates
auto insert = detail::insert_coordinate<coordinate_type, map_type,
index_type *>{
*origin_map.m_map, // map
origin_map.const_coordinate_data(), // coordinates,
origin_valid_row_index.data(), // valid row
origin_valid_map_index.data(), // iter offset
m_coordinate_size};
thrust::counting_iterator<uint32_t> count_begin{0};
thrust::for_each(thrust::device, count_begin, count_begin + N_unique, insert);
#ifdef DEBUG
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("origin map insertion");
#endif
m_byte_allocator.deallocate((char *)d_batch_indices,
N * sizeof(coordinate_type));
return origin_map;
}
template <typename coordinate_type, typename coordinate_int_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_int_type, TemplatedAllocator>
CoordinateFieldMapGPU<coordinate_type, coordinate_int_type,
TemplatedAllocator>::origin() const {
size_type const N = size();
LOG_DEBUG("Origin map from in map size:", N);
// tensor stride is set to {0,..., 0} for the origin map.
stride_type origin_tensor_stride(m_coordinate_size - 1);
std::for_each(origin_tensor_stride.begin(), origin_tensor_stride.end(),
[](auto &i) { i = 0; });
// thrust unique for unique batch index
coordinate_int_type *d_batch_indices =
reinterpret_cast<coordinate_int_type *>(
m_byte_allocator.allocate(N * sizeof(coordinate_int_type)));
hipLaunchKernelGGL(( detail::copy_column<coordinate_int_type, coordinate_type, size_type, true>)
, dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0,
d_batch_indices, N, const_coordinate_data(), m_coordinate_size);
// Sort and unique
thrust::sort(thrust::device, d_batch_indices, d_batch_indices + N);
auto d_batch_indices_end =
thrust::unique(thrust::device, d_batch_indices, d_batch_indices + N);
size_type const N_unique = d_batch_indices_end - d_batch_indices;
// Create origin map
LOG_DEBUG("Origin map with size:", N_unique,
" tensor stride:", origin_tensor_stride);
CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> origin_map(
N_unique, m_coordinate_size, 50, origin_tensor_stride);
CUDA_CHECK(
hipMemset(origin_map.coordinate_data(), 0,
N_unique * m_coordinate_size * sizeof(coordinate_int_type)));
hipLaunchKernelGGL(( detail::copy_column<coordinate_int_type, coordinate_int_type, size_type,
false>)
, dim3(GET_BLOCKS(N_unique, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0,
origin_map.coordinate_data(), N_unique, d_batch_indices,
m_coordinate_size);
m_byte_allocator.deallocate((char *)d_batch_indices,
N * sizeof(coordinate_type));
origin_map.initialize_valid_indices(N_unique);
return origin_map;
}
namespace detail {
template <typename coordinate_field_type, //
typename coordinate_int_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void origin_field_map_kernel(
size_type const num_threads, //
coordinate_field_type const *__restrict__ d_field_coords, //
map_type const __restrict__ origin_map, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps, //
index_type *__restrict__ p_kernels, //
size_type const coordinate_size) {
extern __shared__ coordinate_int_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
// clang-format off
coordinate_int_type *sh_tmp = sh_all + tx * coordinate_size;
// clang-format on
if (x < num_threads)
for (index_type i = 0; i < coordinate_size; ++i)
sh_tmp[i] = 0;
__syncthreads();
if (x < num_threads) {
sh_tmp[0] =
coordinate_int_type(lroundf(d_field_coords[x * coordinate_size]));
auto origin_iter = origin_map.find(coordinate<coordinate_int_type>(sh_tmp));
auto out_index = origin_iter->second;
p_in_maps[x] = x;
p_out_maps[x] = out_index; // origin_map row index
// For kernel_map decompose()
p_kernels[x] = out_index;
}
}
} // namespace detail
template <typename coordinate_field_type, typename coordinate_int_type,
template <typename T> class TemplatedAllocator>
CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type,
TemplatedAllocator>::kernel_map_type
CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type,
TemplatedAllocator>::
origin_map(CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> const
&origin_map,
uint32_t thread_dim) const {
ASSERT(std::all_of(origin_map.get_tensor_stride().begin(),
origin_map.get_tensor_stride().end(),
[](auto const &i) { return i == 0; }),
"Invalid origin tensor stride", origin_map.get_tensor_stride());
// reserve size();
size_type const in_size = size();
LOG_DEBUG("in_map size:", in_size, "origin_map size:", origin_map.size());
// (THREAD * D) * 4
uint32_t const shared_memory_size_in_bytes =
thread_dim * m_coordinate_size * sizeof(coordinate_int_type); // tmp
size_type const num_threads = in_size;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("origin_map num block", num_blocks);
LOG_DEBUG("origin_map shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("origin_map threads dim", thread_dim);
LOG_DEBUG("origin_map num threads", num_threads);
kernel_map_type kernel_map(in_size, base_type::m_byte_allocator);
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("Allocated kernel_map.");
hipLaunchKernelGGL(( detail::origin_field_map_kernel<coordinate_field_type, coordinate_int_type,
size_type, index_type, int_hash_map_type>)
, dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0,
num_threads, //
const_coordinate_data(), //
origin_map.const_hash_map(), //
kernel_map.in_maps.begin(), //
kernel_map.out_maps.begin(), //
kernel_map.kernels.begin(), //
m_coordinate_size);
CUDA_CHECK(hipStreamSynchronize(0));
THRUST_CHECK(kernel_map.decompose());
LOG_DEBUG("origin map decomposed");
return kernel_map;
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void prune_copy_and_insert(
size_type const num_threads, //
size_type const coordinate_size, //
index_type const unused_map_offset, //
index_type const *const __restrict__ in_valid_row_index, //
coordinate_type const *const __restrict__ in_coordinates, //
bool const *const __restrict__ keep_begin, //
index_type const *const __restrict__ inclusive_scan_keep, //
map_type __restrict__ out_map, //
coordinate_type *__restrict__ out_coordinates, //
index_type *__restrict__ out_valid_row_index, //
index_type *__restrict__ out_valid_map_offset //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
if (!keep_begin[x]) {
out_valid_map_offset[x] = unused_map_offset;
} else {
// If keep,
auto out_row_index = (x < 1) ? 0 : inclusive_scan_keep[x - 1];
coordinate_type const *curr_in_coord =
&in_coordinates[in_valid_row_index[x] * coordinate_size];
coordinate_type *curr_out_coord =
&out_coordinates[out_row_index * coordinate_size];
for (index_type i = 0; i < coordinate_size; ++i)
curr_out_coord[i] = curr_in_coord[i];
// insert to the out_map
auto coord = coordinate<coordinate_type>{curr_out_coord};
// remap the value in the next kernel call
auto result = out_map.insert(thrust::make_pair(coord, 0));
out_valid_row_index[x] = out_row_index;
if (result.second)
out_valid_map_offset[x] = result.first.offset();
else
out_valid_map_offset[x] = unused_map_offset;
}
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void remap(size_type const num_threads, //
map_type const __restrict__ out_map, //
index_type *__restrict__ out_valid_map_offset //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
auto &pair = out_map.data()[out_valid_map_offset[x]];
pair.second = x;
}
}
template <typename Dtype, typename Stype>
__global__ void typed_copy(uint32_t const num_threads, //
Dtype *__restrict__ dst, //
Stype const *__restrict__ src //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
dst[x] = src[x];
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::prune(
bool const *keep_begin, bool const *keep_end) const {
size_type const N = size();
ASSERT(N == keep_end - keep_begin, "Invalid keep size");
LOG_DEBUG("Prune size:", N);
// exclusive sum for coordinate copy.
auto const inclusive_scan_size = N * sizeof(index_type);
index_type *d_inclusive_scan =
(index_type *)m_byte_allocator.allocate(inclusive_scan_size);
// bool -> index_type
hipLaunchKernelGGL(( detail::typed_copy), dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0,
N, d_inclusive_scan, keep_begin);
CUDA_CHECK(hipStreamSynchronize(0));
thrust::inclusive_scan(thrust::device, d_inclusive_scan, d_inclusive_scan + N,
d_inclusive_scan);
index_type N_pruned;
CUDA_CHECK(hipMemcpy(&N_pruned, d_inclusive_scan + N - 1, sizeof(index_type),
hipMemcpyDeviceToHost));
LOG_DEBUG("Pruned N:", N_pruned);
// create a coordinate_map
self_type pruned_map(N, m_coordinate_size, m_hashtable_occupancy,
base_type::m_tensor_stride, m_map_allocator,
base_type::m_byte_allocator);
// Copy and insert kernel that first checks keep[i] is true and insert at
// inclusive_scan[i - 1].
auto &out_valid_map_offset = pruned_map.m_valid_map_index;
auto &out_valid_row_index = pruned_map.m_valid_row_index;
out_valid_map_offset.resize(N);
out_valid_row_index.resize(N);
index_type const unused_map_offset = std::numeric_limits<index_type>::max();
hipLaunchKernelGGL(( detail::prune_copy_and_insert<coordinate_type, size_type, index_type,
map_type>)
, dim3(GET_BLOCKS(N, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0,
N, m_coordinate_size, unused_map_offset, m_valid_row_index.cbegin(),
const_coordinate_data(), keep_begin, d_inclusive_scan,
*(pruned_map.m_map), pruned_map.coordinate_data(),
out_valid_row_index.data(), out_valid_map_offset.data());
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("Pruned hash map size:", pruned_map.size());
// Remove not inserted rows
auto valid_begin = thrust::make_zip_iterator(thrust::make_tuple(
out_valid_map_offset.begin(), out_valid_row_index.begin()));
size_type const number_of_valid =
thrust::remove_if(
thrust::device, valid_begin,
thrust::make_zip_iterator(thrust::make_tuple(
out_valid_map_offset.end(), out_valid_row_index.end())),
detail::is_first<index_type>(unused_map_offset)) -
valid_begin;
LOG_DEBUG("number of valid rows:", number_of_valid);
out_valid_map_offset.resize(number_of_valid);
out_valid_row_index.resize(number_of_valid);
pruned_map.m_size = number_of_valid;
// remap the final map values
hipLaunchKernelGGL(( detail::remap<coordinate_type, size_type, index_type, map_type>)
, dim3(GET_BLOCKS(number_of_valid, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0,
number_of_valid, *(pruned_map.m_map), out_valid_map_offset.data());
CUDA_CHECK(hipStreamSynchronize(0));
m_byte_allocator.deallocate((char *)d_inclusive_scan, inclusive_scan_size);
return pruned_map;
}
// Merge
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
copy_coordinates_by_offset(map_type __restrict__ map, //
coordinate_type *__restrict__ coordinates, //
index_type const *__restrict__ map_offsets, //
size_type const num_threads, //
size_type const coordinate_size //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
typename map_type::value_type const *p_value = map.data() + map_offsets[x];
// Compute Capabilities 3.5 or newer
coordinate_type *dst_coordinate =
coordinates + p_value->second * coordinate_size;
for (index_type i = 0; i < coordinate_size; ++i)
dst_coordinate[i] = p_value->first[i];
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void copy_coordinates_by_valid_row(
// map_type __restrict__ map, //
coordinate_type const *__restrict__ in_coordinates, //
coordinate_type *__restrict__ out_coordinates, //
index_type const *__restrict__ valid_row, //
size_type const num_threads, //
size_type const coordinate_size //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
// Compute Capabilities 3.5 or newer
index_type const row_index = x / coordinate_size;
index_type const col_index = x % coordinate_size;
out_coordinates[row_index * coordinate_size + col_index] =
in_coordinates[valid_row[row_index] * coordinate_size + col_index];
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void insert_and_map_kernel_with_offset(
map_type __restrict__ map, //
coordinate_type const *__restrict__ coordinates, //
index_type const coordinate_row_offset, //
index_type *__restrict__ valid_map_index, //
index_type *__restrict__ valid_row_index, //
size_type const num_threads, //
size_type const coordinate_size, //
index_type const unused_key) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
// m_map.insert(pair);
// Returns pair<iterator, (bool)insert_success>
auto const result = map.insert(thrust::make_pair(
coordinate<coordinate_type>{&coordinates[x * coordinate_size]}, x));
if (result.second) {
valid_row_index[x] = x + coordinate_row_offset;
// success map index. remove failed insertion with success.
valid_map_index[x] = result.first.offset();
} else {
valid_map_index[x] = unused_key;
}
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::merge(
std::vector<std::reference_wrapper<self_type>> const &maps) const {
// reserve size
size_t all_size = std::accumulate(
maps.begin(), maps.end(), 0,
[](size_t sum, const self_type &map) { return sum + map.size(); });
LOG_DEBUG("Out merge map capacity:", all_size);
self_type merged_map(all_size, m_coordinate_size, m_hashtable_occupancy,
base_type::m_tensor_stride, m_map_allocator,
base_type::m_byte_allocator);
merged_map.m_valid_row_index.resize(all_size);
merged_map.m_valid_map_index.resize(all_size);
// Copy valid coordinates to the merged map
coordinate_type *curr_coordinates = merged_map.coordinate_data();
index_type *curr_valid_map_offset = merged_map.m_valid_map_index.data();
index_type *curr_valid_row_index = merged_map.m_valid_row_index.data();
index_type const unused_key = std::numeric_limits<index_type>::max();
index_type row_offset{0};
for (self_type const &map : maps) {
size_type const num_threads = map.size();
if (num_threads == 0)
continue;
size_type const num_blocks =
GET_BLOCKS(num_threads * m_coordinate_size, CUDA_NUM_THREADS);
LOG_DEBUG("Current merge map size:", num_threads);
hipLaunchKernelGGL(( detail::copy_coordinates_by_valid_row<coordinate_type, size_type,
index_type, map_type>)
, dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, map.const_coordinate_data(), //
curr_coordinates, //
map.m_valid_row_index.cdata(), //
num_threads * m_coordinate_size, //
m_coordinate_size);
hipLaunchKernelGGL(( detail::insert_and_map_kernel_with_offset<coordinate_type, size_type,
index_type, map_type>)
, dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0, *(merged_map.m_map),
curr_coordinates, //
row_offset, //
curr_valid_map_offset, //
curr_valid_row_index, //
num_threads, m_coordinate_size,
unused_key);
CUDA_CHECK(hipStreamSynchronize(0));
curr_coordinates += num_threads * m_coordinate_size;
curr_valid_map_offset += num_threads;
curr_valid_row_index += num_threads;
row_offset += num_threads;
}
// Remove invalid maps
auto valid_begin = thrust::make_zip_iterator(
thrust::make_tuple(merged_map.m_valid_map_index.begin(),
merged_map.m_valid_row_index.begin()));
size_type const number_of_valid =
thrust::remove_if(thrust::device, valid_begin,
thrust::make_zip_iterator(thrust::make_tuple(
merged_map.m_valid_map_index.end(),
merged_map.m_valid_row_index.end())),
detail::is_first<index_type>(unused_key)) -
valid_begin;
// remap the final map row index and the map offset
hipLaunchKernelGGL(( detail::remap<coordinate_type, size_type, index_type, map_type>)
, dim3(GET_BLOCKS(number_of_valid, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS), 0, 0,
number_of_valid, *(merged_map.m_map),
merged_map.m_valid_map_index.data());
merged_map.m_valid_row_index.resize(number_of_valid);
merged_map.m_valid_map_index.resize(number_of_valid);
merged_map.m_size = number_of_valid;
return merged_map;
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
count_kernel(map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ out_valid_map_index, //
size_type const num_threads, //
gpu_kernel_region<coordinate_type> kernel, //
index_type *__restrict__ p_count_per_thread) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type const coordinate_size = kernel.coordinate_size();
size_type const volume = kernel.volume();
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_tensor_stride = sh_size;
size_type *sh_kernel_size = sh_tensor_stride + coordinate_size;
size_type *sh_dilation = sh_kernel_size + coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
auto const equal = out_map.get_key_equal();
// kernel_maps
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = kernel.tensor_stride()[i];
sh_kernel_size[i] = kernel.kernel_size()[i];
sh_dilation[i] = kernel.dilation()[i];
}
__syncthreads();
auto sh_kernel = gpu_kernel_region<coordinate_type>(
kernel, sh_tensor_stride, sh_kernel_size, sh_dilation);
coordinate<coordinate_type> point(sh_tmp);
auto const unused_key = out_map.get_unused_key();
if (x < num_threads) {
size_type count = 0;
typename map_type::value_type const &out_value =
out_map.data()[out_valid_map_index[x]];
// valid_index guarantees that it contains a valid value
if (!equal(out_value.first, unused_key)) {
for (auto kernel_ind = 0; kernel_ind < volume; ++kernel_ind) {
sh_kernel.coordinate_at(kernel_ind, out_value.first.data(), sh_tmp);
if (in_map.find(point) != in_map.end()) {
++count;
}
}
}
p_count_per_thread[x] = count;
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void preallocated_kernel_map_iteration(
map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ out_valid_map_index, //
size_type const num_threads, //
gpu_kernel_region<coordinate_type> kernel, //
index_type const *const __restrict__ inclusive_count_cumsum_per_thread, //
index_type *__restrict__ p_kernels, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type const coordinate_size = kernel.coordinate_size();
size_type const volume = kernel.volume();
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_tensor_stride = sh_size;
size_type *sh_kernel_size = sh_tensor_stride + coordinate_size;
size_type *sh_dilation = sh_kernel_size + coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
auto const equal = out_map.get_key_equal();
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = kernel.tensor_stride()[i];
sh_kernel_size[i] = kernel.kernel_size()[i];
sh_dilation[i] = kernel.dilation()[i];
}
__syncthreads();
auto sh_kernel = gpu_kernel_region<coordinate_type>(
kernel, sh_tensor_stride, sh_kernel_size, sh_dilation);
coordinate<coordinate_type> curr_coordinate(sh_tmp);
auto const unused_key = out_map.get_unused_key();
if (x < num_threads) {
// iterate over values
auto kernel_map_index =
(x < 1) ? 0 : inclusive_count_cumsum_per_thread[x - 1];
typename map_type::value_type const &out_value =
out_map.data()[out_valid_map_index[x]];
if (!equal(out_value.first, unused_key)) {
// set bounds for the valid keys
for (uint32_t kernel_index = 0; kernel_index < volume; ++kernel_index) {
sh_kernel.coordinate_at(kernel_index, out_value.first.data(), sh_tmp);
auto const &in_result = in_map.find(curr_coordinate);
if (in_result != in_map.end()) {
// insert to
p_kernels[kernel_map_index] = kernel_index;
p_in_maps[kernel_map_index] = (*in_result).second;
p_out_maps[kernel_map_index] = out_value.second;
++kernel_map_index;
}
}
}
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
direct_in_out_map(size_type const num_threads, //
map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ out_valid_map_offset, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps,
index_type const unused_key) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
typename map_type::value_type const &out_value =
out_map.data()[out_valid_map_offset[x]];
auto const &result = in_map.find(out_value.first);
if (result != in_map.end()) {
p_in_maps[x] = (*result).second;
p_out_maps[x] = out_value.second;
} else {
p_in_maps[x] = unused_key;
}
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
direct_kernel_map(map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ out_valid_map_index, //
size_type const num_threads, //
gpu_kernel_region<coordinate_type> kernel, //
index_type *__restrict__ p_kernels, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps,
index_type const unused_map_value) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type const coordinate_size = kernel.coordinate_size();
size_type const volume = kernel.volume();
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_tensor_stride = sh_size;
size_type *sh_kernel_size = sh_tensor_stride + coordinate_size;
size_type *sh_dilation = sh_kernel_size + coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
auto const equal = out_map.get_key_equal();
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = kernel.tensor_stride()[i];
sh_kernel_size[i] = kernel.kernel_size()[i];
sh_dilation[i] = kernel.dilation()[i];
}
__syncthreads();
auto sh_kernel = gpu_kernel_region<coordinate_type>(
kernel, sh_tensor_stride, sh_kernel_size, sh_dilation);
auto const unused_key = out_map.get_unused_key();
if (x < num_threads) {
// iterate over values
index_type kernel_index = x % volume;
typename map_type::value_type const &out_value =
out_map.data()[out_valid_map_index[x / volume]];
if (!equal(out_value.first, unused_key)) {
// set bounds for the valid keys
// TODO: copy the curr_coordinate to sh_curr_coordinate
sh_kernel.coordinate_at(kernel_index, out_value.first.data(), sh_tmp);
auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp));
if (in_result != in_map.end()) {
// insert to
p_kernels[x] = kernel_index;
p_in_maps[x] = (*in_result).second;
p_out_maps[x] = out_value.second;
} else {
p_kernels[x] = unused_map_value;
}
}
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map(
self_type const &out_map, gpu_kernel_region<coordinate_type> const &kernel,
CUDAKernelMapMode::Mode kernel_map_mode, uint32_t thread_dim) const {
// Over estimate the reserve size to be size();
size_type const out_size = out_map.size();
size_type const kernel_volume = kernel.volume();
ASSERT(kernel_volume > 0, "Invalid kernel");
if (kernel_volume == 1) {
// directly iterate over all output first by finding all in out map.
auto const N = out_size;
LOG_DEBUG("out_map size:", N);
index_type *in_out_map = (index_type *)base_type::m_byte_allocator.allocate(
2 * (N + 1) * sizeof(index_type));
index_type *ins = in_out_map;
index_type *outs =
in_out_map + N + 1; // for __restrict__ collision prevention
index_type unused_key = std::numeric_limits<index_type>::max();
hipLaunchKernelGGL(( detail::direct_in_out_map<coordinate_type, size_type, index_type, map_type>)
, dim3(GET_BLOCKS(N, thread_dim)), dim3(thread_dim), 0, 0,
N, *m_map, //
*(out_map.m_map), //
out_map.m_valid_map_index.cdata(), //
ins, // in map
outs, // out map
unused_key);
LOG_DEBUG("Direct in out map copy done");
auto begin = thrust::make_zip_iterator(thrust::make_tuple(ins, outs));
auto const valid_size =
thrust::remove_if(
thrust::device, begin,
thrust::make_zip_iterator(thrust::make_tuple(ins + N, outs + N)),
detail::is_first<index_type>(unused_key)) -
begin;
LOG_DEBUG("Valid size:", valid_size);
kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator, false);
CUDA_CHECK(hipMemcpy(kernel_map.in_maps.data(), ins,
valid_size * sizeof(index_type),
hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(kernel_map.out_maps.data(), outs,
valid_size * sizeof(index_type),
hipMemcpyDeviceToDevice));
base_type::m_byte_allocator.deallocate((char *)in_out_map,
2 * (N + 1) * sizeof(index_type));
LOG_DEBUG("Cleaning up");
return kernel_map;
} else if (kernel_map_mode == CUDAKernelMapMode::MEMORY_EFFICIENT &&
kernel.region_type() != RegionType::CUSTOM) {
// (THREAD * D + 3 * D) * 4
uint32_t const shared_memory_size_in_bytes =
3 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation
thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp
// clang-format on
size_type const num_threads = out_size;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("num block", num_blocks);
LOG_DEBUG("out_map size", out_map.size());
LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("threads dim", thread_dim);
LOG_DEBUG("num threads", num_threads);
index_type *d_p_count_per_thread = reinterpret_cast<index_type *>(
base_type::m_byte_allocator.allocate(num_threads * sizeof(index_type)));
// Initialize count per thread
hipLaunchKernelGGL(( detail::count_kernel<coordinate_type, size_type, index_type, map_type>)
, dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0,
*m_map, //
*out_map.m_map, //
out_map.m_valid_map_index.cbegin(), //
num_threads, //
kernel, //
d_p_count_per_thread);
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("count_kernel finished");
thrust::inclusive_scan(thrust::device, d_p_count_per_thread,
d_p_count_per_thread + num_threads,
d_p_count_per_thread);
index_type num_kernel_map; // type following the kernel map allocator
CUDA_CHECK(hipMemcpy(&num_kernel_map,
d_p_count_per_thread + num_threads - 1,
sizeof(index_type), hipMemcpyDeviceToHost));
// set kernel map
LOG_DEBUG("Found", num_kernel_map, "kernel map elements.");
kernel_map_type kernel_map(num_kernel_map, base_type::m_byte_allocator);
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("Allocated kernel_map.");
hipLaunchKernelGGL(( detail::preallocated_kernel_map_iteration<coordinate_type, size_type,
index_type, map_type>)
, dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0,
*m_map, //
*out_map.m_map, //
out_map.m_valid_map_index.cbegin(), //
num_threads, //
kernel, //
d_p_count_per_thread, //
kernel_map.kernels.begin(), //
kernel_map.in_maps.begin(), //
kernel_map.out_maps.begin());
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("Preallocated kernel map done");
THRUST_CHECK(kernel_map.decompose());
base_type::m_byte_allocator.deallocate(
reinterpret_cast<char *>(d_p_count_per_thread),
num_threads * sizeof(index_type));
LOG_DEBUG("hipFree");
return kernel_map;
} else if (kernel_map_mode == CUDAKernelMapMode::SPEED_OPTIMIZED &&
kernel.region_type() != RegionType::CUSTOM) {
// (THREAD * 3 * D + 3 * D) * 4
uint32_t const shared_memory_size_in_bytes =
3 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation
(thread_dim + (thread_dim + kernel_volume - 1) / kernel_volume) *
m_coordinate_size *
sizeof(coordinate_type); // tmp coordinate + current coordinate
size_type const num_threads = out_size * kernel_volume;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("num block", num_blocks);
LOG_DEBUG("out_map size", out_map.size());
LOG_DEBUG("kernel_volume", kernel_volume);
LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("threads dim", thread_dim);
LOG_DEBUG("num threads", num_threads);
index_type unused_map_value = std::numeric_limits<index_type>::max();
index_type *d_p_valid_in_index =
reinterpret_cast<index_type *>(base_type::m_byte_allocator.allocate(
3 * (num_threads + 1) * sizeof(index_type)));
index_type *d_p_valid_out_index = d_p_valid_in_index + num_threads + 1;
index_type *d_p_valid_kernel_index = d_p_valid_out_index + num_threads + 1;
// Initialize count per thread
hipLaunchKernelGGL(( detail::direct_kernel_map<coordinate_type, size_type, index_type, map_type>)
, dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0,
*m_map, //
*out_map.m_map, //
out_map.m_valid_map_index.cbegin(), //
num_threads, //
kernel, //
d_p_valid_kernel_index, //
d_p_valid_in_index, //
d_p_valid_out_index, //
unused_map_value);
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("direct_kernel_map finished");
auto begin = thrust::make_zip_iterator(thrust::make_tuple(
d_p_valid_kernel_index, d_p_valid_in_index, d_p_valid_out_index));
auto const valid_size =
thrust::remove_if(thrust::device, begin,
thrust::make_zip_iterator(thrust::make_tuple(
d_p_valid_kernel_index + num_threads,
d_p_valid_in_index + num_threads,
d_p_valid_out_index + num_threads)),
detail::is_first<index_type>(unused_map_value)) -
begin;
LOG_DEBUG("Valid size:", valid_size);
kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator);
CUDA_CHECK(hipMemcpy(kernel_map.kernels.data(), d_p_valid_kernel_index,
valid_size * sizeof(index_type),
hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(kernel_map.in_maps.data(), d_p_valid_in_index,
valid_size * sizeof(index_type),
hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(kernel_map.out_maps.data(), d_p_valid_out_index,
valid_size * sizeof(index_type),
hipMemcpyDeviceToDevice));
THRUST_CHECK(kernel_map.decompose());
base_type::m_byte_allocator.deallocate(
reinterpret_cast<char *>(d_p_valid_in_index),
3 * (num_threads + 1) * sizeof(index_type));
LOG_DEBUG("hipFree");
return kernel_map;
} else { // kernel volume == 1
ASSERT(false, "Not implemented");
}
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
stride_map_kernel(map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ in_valid_map_index, //
size_type const num_threads, //
index_type const *const __restrict__ stride, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps,
size_type const coordinate_size,
index_type const unused_key) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_stride = sh_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_size + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_stride[i] = stride[i];
}
__syncthreads();
if (x >= num_threads)
return;
typename map_type::value_type const &in_value =
in_map.data()[in_valid_map_index[x]];
sh_tmp[0] = in_value.first[0];
for (index_type j = 1; j < coordinate_size; ++j) {
sh_tmp[j] =
(__float2int_rd(__fdiv_rd(in_value.first[j], sh_stride[j - 1]))) *
sh_stride[j - 1];
}
auto out_iter = out_map.find(coordinate<coordinate_type>(sh_tmp));
if (out_iter == out_map.end()) {
p_in_maps[x] = unused_key;
} else {
p_in_maps[x] = in_value.second;
p_out_maps[x] = out_iter->second;
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride_map(
self_type const &out_map, stride_type const &out_tensor_stride,
uint32_t thread_dim) const {
LOG_DEBUG("generating stride_map from stride", base_type::m_tensor_stride,
"to", out_map.get_tensor_stride());
// Over estimate the reserve size to be size();
size_type const in_size = size();
index_storage_type d_out_tensor_stride(out_tensor_stride);
index_type unused_key = std::numeric_limits<index_type>::max();
// (THREAD * D + D) * 4
uint32_t const shared_memory_size_in_bytes =
m_coordinate_size * sizeof(index_type) + // stride
thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp
size_type const num_threads = in_size;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("num block", num_blocks);
LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("threads dim", thread_dim);
LOG_DEBUG("num threads", num_threads);
index_type *in_out_map = (index_type *)base_type::m_byte_allocator.allocate(
2 * (in_size + 1) * sizeof(index_type));
index_type *ins = in_out_map;
index_type *outs =
in_out_map + in_size + 1; // for __restrict__ collision prevention
LOG_DEBUG("Allocated temporary memory");
LOG_DEBUG("out_map size", out_map.size(),
"out tensor stride:", out_map.get_tensor_stride(),
"coordinate_size", m_coordinate_size);
hipLaunchKernelGGL(( detail::stride_map_kernel<coordinate_type, size_type, index_type, map_type>)
, dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0,
*m_map, //
*out_map.m_map, //
m_valid_map_index.cbegin(), //
num_threads, //
d_out_tensor_stride.cbegin(), //
ins, //
outs, //
m_coordinate_size, //
unused_key);
auto begin = thrust::make_zip_iterator(thrust::make_tuple(ins, outs));
auto const valid_size =
thrust::remove_if(thrust::device, begin,
thrust::make_zip_iterator(
thrust::make_tuple(ins + in_size, outs + in_size)),
detail::is_first<index_type>(unused_key)) -
begin;
LOG_DEBUG("Valid size:", valid_size);
kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator, false);
CUDA_CHECK(hipMemcpy(kernel_map.in_maps.data(), ins,
valid_size * sizeof(index_type),
hipMemcpyDeviceToDevice));
CUDA_CHECK(hipMemcpy(kernel_map.out_maps.data(), outs,
valid_size * sizeof(index_type),
hipMemcpyDeviceToDevice));
base_type::m_byte_allocator.deallocate(
(char *)in_out_map, 2 * (in_size + 1) * sizeof(index_type));
return kernel_map;
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
origin_map_kernel(map_type const __restrict__ in_map, //
map_type const __restrict__ origin_map, //
index_type const *const __restrict__ in_valid_map_index, //
size_type const num_threads, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps,
index_type *__restrict__ p_kernels,
size_type const coordinate_size) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
// clang-format off
coordinate_type *sh_tmp = sh_all + tx * coordinate_size;
// clang-format on
if (x < num_threads)
for (index_type i = 0; i < coordinate_size; ++i)
sh_tmp[i] = 0;
__syncthreads();
if (x < num_threads) {
typename map_type::value_type const &in_value =
in_map.data()[in_valid_map_index[x]];
sh_tmp[0] = in_value.first[0];
auto origin_iter = origin_map.find(coordinate<coordinate_type>(sh_tmp));
p_in_maps[x] = in_value.second;
p_out_maps[x] = origin_iter->second; // origin_map row index
// For kernel_map decompose()
p_kernels[x] = origin_iter->second;
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::origin_map(
self_type const &origin_map, uint32_t thread_dim) const {
ASSERT(std::all_of(origin_map.get_tensor_stride().begin(),
origin_map.get_tensor_stride().end(),
[](auto const &i) { return i == 0; }),
"Invalid origin tensor stride", origin_map.get_tensor_stride());
// reserve size();
size_type const in_size = size();
LOG_DEBUG("in_map size:", in_size, "origin_map size:", origin_map.size());
// (THREAD * D) * 4
uint32_t const shared_memory_size_in_bytes =
thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp
size_type const num_threads = in_size;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("origin_map num block", num_blocks);
LOG_DEBUG("origin_map shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("origin_map threads dim", thread_dim);
LOG_DEBUG("origin_map num threads", num_threads);
kernel_map_type kernel_map(in_size, base_type::m_byte_allocator);
CUDA_CHECK(hipStreamSynchronize(0));
LOG_DEBUG("Allocated kernel_map.");
hipLaunchKernelGGL(( detail::origin_map_kernel<coordinate_type, size_type, index_type, map_type>)
, dim3(num_blocks), dim3(thread_dim), shared_memory_size_in_bytes, 0,
*m_map, //
*origin_map.m_map, //
m_valid_map_index.cbegin(), //
num_threads, //
kernel_map.in_maps.begin(), //
kernel_map.out_maps.begin(), //
kernel_map.kernels.begin(), //
m_coordinate_size);
CUDA_CHECK(hipStreamSynchronize(0));
THRUST_CHECK(kernel_map.decompose());
LOG_DEBUG("origin map decomposed");
return kernel_map;
}
namespace detail {
template <typename coordinate_type,
typename index_type, //
typename stride_type, //
typename float_type, //
typename map_type>
__global__ void
interpolation_kernel(map_type __restrict__ in_map, //
index_type const num_threads, //
float_type const *__restrict__ p_tfield, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps, //
float_type *__restrict__ p_weights, //
stride_type const *__restrict__ p_tensor_stride, //
index_type const unused_map_value,
index_type const coordinate_size,
index_type const neighbor_volume) {
// coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type)
// + THREADS * coordinate_size * sizeof(coordinate_type)
SharedMemory<float_type> shared;
float_type *sh_all = shared.getPointer();
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
float_type *sh_tfield = sh_all + tx * coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(
sh_all + CUDA_NUM_THREADS * coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
index_type *sh_tensor_stride = reinterpret_cast<index_type *>(
sh_coordinate + CUDA_NUM_THREADS * coordinate_size);
auto const equal = in_map.get_key_equal();
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = p_tensor_stride[i];
}
if (x < num_threads) {
index_type const offset = coordinate_size * (x / neighbor_volume);
for (index_type i = 0; i < coordinate_size; ++i) {
sh_tfield[i] = p_tfield[offset + i];
}
}
__syncthreads();
if (x < num_threads) {
// iterate over values
uint32_t neighbor_ind = x % neighbor_volume;
// batch index
sh_tmp[0] = lrint(sh_tfield[0]);
uint32_t mask = 1;
for (uint32_t j = coordinate_size - 1; j > 0; --j) {
index_type curr_tensor_stride = sh_tensor_stride[j - 1];
if ((neighbor_ind & mask) == 0)
sh_tmp[j] =
floor(sh_tfield[j] / curr_tensor_stride) * curr_tensor_stride;
else
sh_tmp[j] =
floor(sh_tfield[j] / curr_tensor_stride) * curr_tensor_stride +
curr_tensor_stride;
mask = mask << 1;
}
auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp));
if (in_result != in_map.end()) {
p_in_maps[x] = (*in_result).second;
p_out_maps[x] = x / neighbor_volume;
// Compute weight
float_type weight = 1;
for (uint32_t j = 1; j < coordinate_size; ++j) {
weight *= 1 - abs(sh_tfield[j] - sh_tmp[j]) / sh_tensor_stride[j - 1];
}
p_weights[x] = weight;
} else {
p_in_maps[x] = unused_map_value;
}
}
}
template <typename coordinate_type,
typename index_type, //
typename stride_type, //
typename float_type, //
typename map_type>
__global__ void
field_map_kernel(map_type __restrict__ in_map, //
index_type const num_threads, //
float_type const *__restrict__ p_tfield, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps, //
stride_type const *__restrict__ p_tensor_stride, //
index_type const unused_map_value,
index_type const coordinate_size) {
// coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type)
// + THREADS * coordinate_size * sizeof(coordinate_type)
SharedMemory<float_type> shared;
float_type *sh_all = shared.getPointer();
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_all);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
index_type *sh_tensor_stride = reinterpret_cast<index_type *>(
sh_coordinate + CUDA_NUM_THREADS * coordinate_size);
auto const equal = in_map.get_key_equal();
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = p_tensor_stride[i];
}
__syncthreads();
index_type const offset = coordinate_size * x;
if (x < num_threads) {
// iterate over values
float_type const *curr_tfield = p_tfield + offset;
// batch index
sh_tmp[0] = lrint(curr_tfield[0]);
for (uint32_t j = coordinate_size - 1; j > 0; --j) {
index_type curr_tensor_stride = sh_tensor_stride[j - 1];
sh_tmp[j] =
floor(curr_tfield[j] / curr_tensor_stride) * curr_tensor_stride;
}
auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp));
if (in_result != in_map.end()) {
p_in_maps[x] = (*in_result).second;
p_out_maps[x] = x;
} else {
p_in_maps[x] = unused_map_value;
}
}
}
// interpolation map inst
template <typename coordinate_type, typename index_type, typename size_type,
typename stride_type, typename field_type, typename map_type,
typename ByteAllocatorType>
std::vector<at::Tensor> interpolation_map_weight_tfield_type(
uint32_t const num_tfield, //
uint32_t const coordinate_size, //
index_type const unused_key, //
field_type const *const p_tfield, //
map_type &map, //
stride_type const *const p_tensor_stride, //
ByteAllocatorType const &byte_allocator,
c10::TensorOptions tfield_options) {
uint32_t const neighbor_volume = ::pow(2, (coordinate_size - 1));
size_type num_threads = neighbor_volume * num_tfield;
LOG_DEBUG("neighbor_volume:", neighbor_volume, "num_tfield:", num_tfield,
"num_threads:", num_threads);
index_type *d_in_map = reinterpret_cast<index_type *>(
byte_allocator.allocate(num_threads * sizeof(index_type)));
index_type *d_out_map = reinterpret_cast<index_type *>(
byte_allocator.allocate(num_threads * sizeof(index_type)));
field_type *d_weight = reinterpret_cast<field_type *>(
byte_allocator.allocate(num_threads * sizeof(field_type)));
size_type shared_memory_size_in_bytes =
coordinate_size * CUDA_NUM_THREADS * sizeof(field_type) +
coordinate_size * CUDA_NUM_THREADS * sizeof(coordinate_type) +
coordinate_size * sizeof(index_type);
LOG_DEBUG("Shared memory size:", shared_memory_size_in_bytes);
hipLaunchKernelGGL(( interpolation_kernel<coordinate_type, index_type, stride_type, field_type,
map_type>)
, dim3(GET_BLOCKS(num_threads, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS),
shared_memory_size_in_bytes, 0, map, //
num_threads, //
p_tfield, //
d_in_map, //
d_out_map, //
d_weight, //
p_tensor_stride, //
unused_key, //
coordinate_size, //
neighbor_volume);
// remove unused_keys
auto valid_begin =
thrust::make_zip_iterator(thrust::make_tuple(d_in_map, //
d_out_map, d_weight));
size_type const number_of_valid =
thrust::remove_if(thrust::device, //
valid_begin, //
thrust::make_zip_iterator(thrust::make_tuple(
d_in_map + num_threads, //
d_out_map + num_threads, d_weight + num_threads)),
detail::is_first<index_type>(unused_key)) -
valid_begin;
LOG_DEBUG("number_of_valid:", number_of_valid);
auto final_in_map =
torch::empty({number_of_valid},
tfield_options.dtype(torch::kInt32).requires_grad(false));
auto final_out_map =
torch::empty({number_of_valid},
tfield_options.dtype(torch::kInt32).requires_grad(false));
auto final_weights =
torch::empty({number_of_valid}, tfield_options.requires_grad(false));
if (number_of_valid > 0) {
CUDA_CHECK(hipMemcpy(final_in_map.template data_ptr<int32_t>(), d_in_map,
number_of_valid * sizeof(int32_t),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(final_out_map.template data_ptr<int32_t>(), d_out_map,
number_of_valid * sizeof(int32_t),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(final_weights.template data_ptr<field_type>(),
d_weight, number_of_valid * sizeof(field_type),
hipMemcpyHostToDevice));
}
byte_allocator.deallocate((char *)d_in_map, num_threads * sizeof(index_type));
byte_allocator.deallocate((char *)d_out_map,
num_threads * sizeof(index_type));
byte_allocator.deallocate((char *)d_weight, num_threads * sizeof(field_type));
return {final_in_map, final_out_map, final_weights};
}
// interpolation map inst
template <typename coordinate_type, typename index_type, typename size_type,
typename stride_type, typename field_type, typename map_type,
typename ByteAllocatorType>
std::pair<at::Tensor, at::Tensor>
field_map_type(uint32_t const num_tfield, //
uint32_t const coordinate_size, //
index_type const unused_key, //
field_type const *const p_tfield, //
map_type &map, //
stride_type const *const p_tensor_stride, //
ByteAllocatorType const &byte_allocator) {
size_type num_threads = num_tfield;
LOG_DEBUG("num_threads:", num_threads);
index_type *d_in_map = reinterpret_cast<index_type *>(
byte_allocator.allocate(num_threads * sizeof(index_type)));
index_type *d_out_map = reinterpret_cast<index_type *>(
byte_allocator.allocate(num_threads * sizeof(index_type)));
size_type shared_memory_size_in_bytes =
coordinate_size * CUDA_NUM_THREADS * sizeof(coordinate_type) +
coordinate_size * sizeof(index_type);
LOG_DEBUG("Shared memory size:", shared_memory_size_in_bytes);
hipLaunchKernelGGL(( field_map_kernel<coordinate_type, index_type, stride_type, field_type,
map_type>)
, dim3(GET_BLOCKS(num_threads, CUDA_NUM_THREADS)), dim3(CUDA_NUM_THREADS),
shared_memory_size_in_bytes, 0, map, //
num_threads, //
p_tfield, //
d_in_map, //
d_out_map, //
p_tensor_stride, //
unused_key, //
coordinate_size);
// remove unused_keys
auto valid_begin =
thrust::make_zip_iterator(thrust::make_tuple(d_in_map, d_out_map));
size_type const number_of_valid =
thrust::remove_if(thrust::device, //
valid_begin, //
thrust::make_zip_iterator(
thrust::make_tuple(d_in_map + num_threads, //
d_out_map + num_threads)),
detail::is_first<index_type>(unused_key)) -
valid_begin;
LOG_DEBUG("number_of_valid:", number_of_valid);
auto curr_device = at::hip::current_device();
auto tfield_options = torch::TensorOptions({at::kCUDA, curr_device})
.dtype(torch::kInt32)
.requires_grad(false);
auto final_in_map = torch::empty({number_of_valid}, tfield_options);
auto final_out_map = torch::empty({number_of_valid}, tfield_options);
if (number_of_valid > 0) {
CUDA_CHECK(hipMemcpy(final_in_map.template data_ptr<int32_t>(), d_in_map,
number_of_valid * sizeof(int32_t),
hipMemcpyHostToDevice));
CUDA_CHECK(hipMemcpy(final_out_map.template data_ptr<int32_t>(), d_out_map,
number_of_valid * sizeof(int32_t),
hipMemcpyHostToDevice));
}
byte_allocator.deallocate((char *)d_in_map, num_threads * sizeof(index_type));
byte_allocator.deallocate((char *)d_out_map,
num_threads * sizeof(index_type));
return {final_in_map, final_out_map};
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
std::vector<at::Tensor>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::interpolation_map_weight(
at::Tensor const &tfield) const {
// Over estimate the reserve size to be size();
ASSERT(tfield.dim() == 2, "Invalid tfield dimension");
ASSERT(tfield.size(1) == m_coordinate_size, "Invalid tfield size");
size_type const num_tfield = tfield.size(0);
uint32_t const neighbor_volume = ::pow(2, (m_coordinate_size - 1));
index_type const unused_key = std::numeric_limits<index_type>::max();
LOG_DEBUG("map size", m_size);
switch (tfield.scalar_type()) {
case at::ScalarType::Double:
return detail::interpolation_map_weight_tfield_type<
coordinate_type, index_type, size_type, index_type, double, map_type,
TemplatedAllocator<char>>(num_tfield, //
m_coordinate_size, //
unused_key, //
tfield.template data_ptr<double>(), //
*m_map, //
m_device_tensor_stride.cbegin(), //
m_byte_allocator, //
tfield.options());
case at::ScalarType::Float:
return detail::interpolation_map_weight_tfield_type<
coordinate_type, index_type, size_type, index_type, float, map_type,
TemplatedAllocator<char>>(num_tfield, //
m_coordinate_size, //
unused_key, //
tfield.template data_ptr<float>(), //
*m_map, //
m_device_tensor_stride.cbegin(), //
m_byte_allocator, //
tfield.options());
default:
ASSERT(false, "Unsupported float type");
}
}
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
template <typename coordinate_field_type>
std::pair<at::Tensor, at::Tensor>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::field_map(
coordinate_field_type const *p_tfield, size_type const num_tfield) const {
index_type const unused_key = std::numeric_limits<index_type>::max();
LOG_DEBUG("map size", m_size);
return detail::field_map_type<coordinate_type, index_type, size_type,
index_type, coordinate_field_type, map_type,
TemplatedAllocator<char>>(
num_tfield, //
m_coordinate_size, //
unused_key, //
p_tfield, //
*m_map, //
m_device_tensor_stride.cbegin(), //
m_byte_allocator);
}
/**
* Union map
*/
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename tensor_type, //
typename map_type>
__global__ void
union_map_kernel(size_type const num_threads, //
map_type const __restrict__ in_map, //
map_type const __restrict__ union_map, //
index_type const *const __restrict__ in_valid_map_index, //
tensor_type *__restrict__ p_in_maps, //
tensor_type *__restrict__ p_union_maps,
size_type const coordinate_size) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
typename map_type::value_type const &in_value =
in_map.data()[in_valid_map_index[x]];
auto union_iter = union_map.find(in_value.first);
p_in_maps[x] = in_value.second;
p_union_maps[x] = union_iter->second;
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
std::vector<at::Tensor>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::union_map(
std::vector<std::reference_wrapper<self_type>> const &in_maps,
uint32_t thread_dim) const {
auto options = torch::TensorOptions({at::kCUDA, at::hip::current_device()})
.dtype(torch::kInt64)
.requires_grad(false);
std::vector<at::Tensor> union_maps;
for (self_type const &in_map : in_maps) {
size_type const num_threads = in_map.m_valid_map_index.size();
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
at::Tensor curr_map = torch::empty({2, num_threads}, options);
LOG_DEBUG("in_map size", num_threads, ", num block", num_blocks,
", threads dim", thread_dim);
int64_t *d_in_map = curr_map.template data_ptr<int64_t>();
hipLaunchKernelGGL(( detail::union_map_kernel<coordinate_type, size_type, index_type, int64_t,
map_type>)
, dim3(num_blocks), dim3(thread_dim), 0, 0, num_threads, //
*in_map.m_map, //
*m_map, //
in_map.m_valid_map_index.cbegin(), //
d_in_map, //
d_in_map + num_threads, //
m_coordinate_size);
CUDA_CHECK(hipStreamSynchronize(0));
union_maps.push_back(std::move(curr_map));
}
return union_maps;
}
// Helper functions
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::copy_coordinates(
coordinate_type *dst_coordinate) const {
size_type const num_threads = size();
if (num_threads <= 0)
return;
// Copy by offset
// size_type const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
// detail::copy_coordinates_by_offset<coordinate_type, size_type, index_type,
// map_type>
// <<<num_blocks, CUDA_NUM_THREADS>>>(
// *m_map, //
// dst_coordinate, //
// m_valid_map_index.data(), //
// num_threads, //
// m_coordinate_size);
size_type const num_blocks =
GET_BLOCKS(num_threads * m_coordinate_size, CUDA_NUM_THREADS);
hipLaunchKernelGGL(( detail::copy_coordinates_by_valid_row<coordinate_type, size_type, index_type,
map_type>)
, dim3(num_blocks), dim3(CUDA_NUM_THREADS), 0, 0,
// *m_map, //
const_coordinate_data(), //
dst_coordinate, //
m_valid_row_index.cbegin(), //
num_threads * m_coordinate_size, //
m_coordinate_size);
}
// Template instantiation
template class CoordinateFieldMapGPU<default_types::ccoordinate_type,
default_types::dcoordinate_type,
detail::default_allocator>;
template class CoordinateFieldMapGPU<default_types::ccoordinate_type,
default_types::dcoordinate_type,
detail::c10_allocator>;
template class CoordinateMapGPU<default_types::dcoordinate_type,
detail::default_allocator>;
template class CoordinateMapGPU<default_types::dcoordinate_type,
detail::c10_allocator>;
template std::pair<
gpu_storage<default_types::index_type, detail::default_allocator<char>>,
gpu_storage<default_types::index_type, detail::default_allocator<char>>>
CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>::
insert_and_map<true>(
coordinate_iterator<default_types::dcoordinate_type> key_first,
coordinate_iterator<default_types::dcoordinate_type> key_last);
template std::pair<
gpu_storage<default_types::index_type, detail::default_allocator<char>>,
gpu_storage<default_types::index_type, detail::default_allocator<char>>>
CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>::
insert_and_map<false>(
coordinate_iterator<default_types::dcoordinate_type> key_first,
coordinate_iterator<default_types::dcoordinate_type> key_last);
template std::pair<
gpu_storage<default_types::index_type, detail::c10_allocator<char>>,
gpu_storage<default_types::index_type, detail::c10_allocator<char>>>
CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>::
insert_and_map<true>(
coordinate_iterator<default_types::dcoordinate_type> key_first,
coordinate_iterator<default_types::dcoordinate_type> key_last);
template std::pair<
gpu_storage<default_types::index_type, detail::c10_allocator<char>>,
gpu_storage<default_types::index_type, detail::c10_allocator<char>>>
CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>::
insert_and_map<false>(
coordinate_iterator<default_types::dcoordinate_type> key_first,
coordinate_iterator<default_types::dcoordinate_type> key_last);
template std::pair<at::Tensor, at::Tensor>
CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>::
field_map<float>(float const *p_tfield,
default_types::size_type const num_tfield) const;
template std::pair<at::Tensor, at::Tensor>
CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>::
field_map<float>(float const *p_tfield,
default_types::size_type const num_tfield) const;
} // namespace minkowski
| b833c81bdf6312dc74543661493e94f1de34ef37.cu | /*
* Copyright (c) 2020 NVIDIA CORPORATION.
* Copyright (c) 2018-2020 Chris Choy ([email protected])
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to deal
* in the Software without restriction, including without limitation the rights
* to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
* copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*
* Please cite "4D Spatio-Temporal ConvNets: Minkowski Convolutional Neural
* Networks", CVPR'19 (https://arxiv.org/abs/1904.08755) if you use any part
* of the code.
*/
#include "coordinate_map_functors.cuh"
#include "coordinate_map_gpu.cuh"
#include "gpu.cuh"
#include "kernel_map.cuh"
#include "kernel_map.hpp"
#include "sharedmem.cuh"
#include <thrust/copy.h>
#include <thrust/execution_policy.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/iterator/transform_iterator.h>
#include <thrust/sort.h>
namespace minkowski {
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
remap_inverse_map(map_type __restrict__ map, //
coordinate_type const *__restrict__ coordinates, //
index_type *__restrict__ inverse_map, //
size_type const num_threads, //
size_type const coordinate_size //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
auto result = map.find(
coordinate<coordinate_type>{&coordinates[x * coordinate_size]});
inverse_map[x] = result->second;
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
insert_and_map_kernel(map_type __restrict__ map, //
coordinate_type const *__restrict__ coordinates, //
index_type *__restrict__ valid_map_index, //
index_type *__restrict__ valid_row_index, //
size_type const num_threads, //
size_type const coordinate_size, //
index_type const unused_key) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
// Returns pair<iterator, (bool)insert_success>
auto const result = map.insert(thrust::make_pair(
coordinate<coordinate_type>{&coordinates[x * coordinate_size]}, x));
// auto test = &coordinates[x * coordinate_size];
if (result.second) {
valid_row_index[x] = x;
// success map index. remove failed insertion with success.
valid_map_index[x] = result.first.offset();
} else {
valid_map_index[x] = unused_key;
}
}
}
} // namespace detail
/*
* Field Map
*/
namespace detail {
template <typename coordinate_field_type, typename coordinate_int_type,
typename index_type, bool stride_one>
__global__ void quantize_coordinates_kernel(
coordinate_field_type const *__restrict__ p_tfield, //
coordinate_int_type *__restrict__ p_stensor, //
index_type const *__restrict__ p_tensor_stride, //
index_type const num_threads, index_type const coordinate_size) {
// coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type)
// + THREADS * coordinate_size * sizeof(coordinate_type)
extern __shared__ index_type sh_tensor_stride[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (stride_one) {
if (x < num_threads) {
if (x % coordinate_size == 0)
p_stensor[x] = lrint(p_tfield[x]);
else
p_stensor[x] = floor(p_tfield[x]);
}
} else {
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = p_tensor_stride[i];
}
__syncthreads();
if (x < num_threads) {
// batch index
if (x % coordinate_size == 0)
p_stensor[x] = lrint(p_tfield[x]);
else {
index_type curr_tensor_stride =
sh_tensor_stride[((x - 1) % coordinate_size)];
p_stensor[x] =
floor(p_tfield[x] / curr_tensor_stride) * curr_tensor_stride;
}
}
}
}
} // namespace detail
template <typename coordinate_field_type, typename coordinate_int_type,
template <typename T> class TemplatedAllocator>
void CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type,
TemplatedAllocator>::
quantize_coordinates(coordinate_int_type *d_dst_coordinates,
stride_type const &tensor_stride) const {
int64_t const stride_prod = std::accumulate(
tensor_stride.begin(), tensor_stride.end(), 1, std::multiplies<>());
// Copy tensor_stride to device
index_type *d_tensor_stride = reinterpret_cast<index_type *>(
m_byte_allocator.allocate(m_coordinate_size * sizeof(index_type)));
CUDA_CHECK(cudaMemcpy(
d_tensor_stride, // dst
tensor_stride.data(), // first element of the dereferenced iter.
sizeof(index_type) * m_coordinate_size, // bytes
cudaMemcpyHostToDevice));
size_type const num_threads = size() * m_coordinate_size;
auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
if (stride_prod == 1) {
detail::quantize_coordinates_kernel<coordinate_field_type,
coordinate_int_type, index_type, true>
<<<num_blocks, CUDA_NUM_THREADS,
m_coordinate_size * sizeof(index_type)>>>(
const_coordinate_data(), d_dst_coordinates, d_tensor_stride,
num_threads, m_coordinate_size);
} else {
detail::quantize_coordinates_kernel<coordinate_field_type,
coordinate_int_type, index_type, false>
<<<num_blocks, CUDA_NUM_THREADS,
m_coordinate_size * sizeof(index_type)>>>(
const_coordinate_data(), d_dst_coordinates, d_tensor_stride,
num_threads, m_coordinate_size);
}
}
/*
* @brief Given a key iterator begin-end pair and a value iterator begin-end
* pair, insert all elements.
*
* @note The key and value iterators can be 1) pointers, 2) coordinate or vector
* iterators.
*
* @return none
*/
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
template <bool remap>
void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::insert(
coordinate_iterator<coordinate_type> key_first,
coordinate_iterator<coordinate_type> key_last) {
size_type const N = key_last - key_first;
LOG_DEBUG("key iterator length", N);
if (N == 0) {
m_size = 0;
return;
}
m_valid_row_index.allocate(N);
m_valid_map_index.allocate(N);
// Copy the coordinates to m_coordinate
base_type::reserve(N);
CUDA_CHECK(
cudaMemcpy(coordinate_data(), // dst
key_first->data(), // first element of the dereferenced iter.
sizeof(coordinate_type) * N * m_coordinate_size, // bytes
cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaDeviceSynchronize());
LOG_DEBUG("Reserved and copiedm", N, "x", m_coordinate_size, "coordinates");
// compute cuda kernel call params
size_type const num_threads = N;
LOG_DEBUG("nm_threads", num_threads);
size_type const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
LOG_DEBUG("nm_blocks", num_blocks);
index_type const unused_key = std::numeric_limits<index_type>::max();
LOG_DEBUG("unused_key", unused_key);
detail::insert_and_map_kernel<coordinate_type, size_type, index_type,
map_type><<<num_blocks, CUDA_NUM_THREADS>>>(
*m_map, //
const_coordinate_data(), //
m_valid_map_index.data(), //
m_valid_row_index.data(), //
num_threads, m_coordinate_size, unused_key);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("Map size:", m_map->size());
// Valid row index
auto valid_begin = thrust::make_zip_iterator(
thrust::make_tuple(m_valid_map_index.begin(), m_valid_row_index.begin()));
size_type const number_of_valid =
thrust::remove_if(thrust::device, valid_begin,
thrust::make_zip_iterator(thrust::make_tuple(
m_valid_map_index.end(), m_valid_row_index.end())),
detail::is_first<index_type>(unused_key)) -
valid_begin;
m_valid_row_index.resize(number_of_valid);
m_valid_map_index.resize(number_of_valid);
m_size = number_of_valid;
LOG_DEBUG("Number of successful insertion", m_size);
if (remap // When remapping
&& number_of_valid != N // when the # of inserted items differ from the #
// of successful insertions
) {
m_inverse_row_index.allocate(N);
thrust::counting_iterator<uint32_t> count_begin{0};
thrust::for_each(count_begin, count_begin + number_of_valid,
detail::update_value_with_offset<index_type, map_type>{
*m_map, m_valid_map_index.data()});
size_type const num_threads = N;
auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
detail::remap_inverse_map<coordinate_type, size_type, index_type, map_type>
<<<num_blocks, CUDA_NUM_THREADS>>>(*m_map, //
const_coordinate_data(), //
m_inverse_row_index.data(), //
num_threads, m_coordinate_size);
LOG_DEBUG("Remapping finished");
}
} // namespace minkowski
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
template <bool remap>
std::pair<gpu_storage<default_types::index_type, TemplatedAllocator<char>>,
gpu_storage<default_types::index_type, TemplatedAllocator<char>>>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::insert_and_map(
coordinate_iterator<coordinate_type> key_first,
coordinate_iterator<coordinate_type> key_last) {
LOG_DEBUG("insert_and_map");
insert<remap>(key_first, key_last);
return std::make_pair(m_valid_row_index, m_inverse_row_index);
}
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::
initialize_valid_indices(size_t const N_unique) {
m_valid_row_index.resize(N_unique);
m_valid_map_index.resize(N_unique);
m_size = N_unique;
// Insert coordinates
auto insert = detail::insert_coordinate<coordinate_type, map_type,
index_type *>{
*m_map, // map
const_coordinate_data(), // coordinates,
m_valid_row_index.data(), // valid row
m_valid_map_index.data(), // iter offset
m_coordinate_size};
thrust::counting_iterator<uint32_t> count_begin{0};
thrust::for_each(thrust::device, count_begin, count_begin + N_unique, insert);
}
/*
* @brief given a key iterator begin-end pair find all valid keys and its
* index.
*
* @return a pair of (valid index, query value) vectors.
*/
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
std::pair<gpu_storage<default_types::index_type, TemplatedAllocator<char>>,
gpu_storage<default_types::index_type, TemplatedAllocator<char>>>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::find(
coordinate_iterator<coordinate_type> key_first,
coordinate_iterator<coordinate_type> key_last) const {
size_type N = key_last - key_first;
LOG_DEBUG(N, "queries for find.");
auto const find_functor = detail::find_coordinate<coordinate_type, map_type>(
*m_map, key_first->data(), m_unused_element, m_coordinate_size);
LOG_DEBUG("Find functor initialized.");
auto const invalid_functor =
detail::is_unused_pair<coordinate_type, mapped_type>(m_unused_element);
LOG_DEBUG("Valid functor initialized.");
thrust::counting_iterator<index_type> index{0};
gpu_storage<index_type, byte_allocator_type> input_index(N);
gpu_storage<index_type, byte_allocator_type> results(N);
LOG_DEBUG("Initialized functors.");
thrust::sequence(thrust::device, input_index.begin(), input_index.end());
thrust::transform(thrust::device, index, index + N, results.begin(),
find_functor);
size_type const number_of_valid =
thrust::remove_if(thrust::device,
thrust::make_zip_iterator(thrust::make_tuple(
input_index.begin(), results.begin())),
thrust::make_zip_iterator(thrust::make_tuple(
input_index.end(), results.end())),
invalid_functor) -
thrust::make_zip_iterator(
thrust::make_tuple(input_index.begin(), results.begin()));
LOG_DEBUG("Number of valid", number_of_valid);
input_index.resize(number_of_valid);
results.resize(number_of_valid);
return std::make_pair(input_index, results);
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type>
__global__ void
stride_copy(coordinate_type const *__restrict__ src_coordinates, //
index_type const *__restrict__ src_valid_row_index, //
index_type const *__restrict__ stride, //
coordinate_type *__restrict__ dst_coordinates, //
size_type const num_threads, size_type const coordinate_size) {
extern __shared__ size_type sh_stride[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x)
sh_stride[i] = stride[i];
__syncthreads();
if (x < num_threads) {
const index_type src_start = src_valid_row_index[x] * coordinate_size;
const index_type dst_start = x * coordinate_size;
dst_coordinates[dst_start] = src_coordinates[src_start];
for (index_type j = 1; j < coordinate_size; ++j) {
dst_coordinates[dst_start + j] =
(__float2int_rd(
__fdiv_rd(src_coordinates[src_start + j], sh_stride[j - 1]))) *
sh_stride[j - 1];
// (__double2int_rd(
// __ddiv_rn(src_coordinates[src_start + j], sh_stride[j - 1]))) *
// sh_stride[j - 1];
}
}
}
} // namespace detail
/*
* @brief given a key iterator begin-end pair find all valid keys and its
* index.
*
* @return a pair of (valid index, query value) vectors.
*/
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride(
stride_type const &stride) const {
// Over estimate the reserve size to be size();
size_type const N = size();
LOG_DEBUG("Strided map with kernel stride:", stride);
self_type stride_map(
N, m_coordinate_size, m_hashtable_occupancy,
detail::stride_tensor_stride(base_type::m_tensor_stride, stride),
m_map_allocator, base_type::m_byte_allocator);
index_storage_type out_device_tensor_stride(stride_map.get_tensor_stride());
// stride coordinates
size_type const num_threads = N;
auto const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
detail::stride_copy<coordinate_type, size_type, index_type>
<<<num_blocks, CUDA_NUM_THREADS, m_coordinate_size * sizeof(size_type)>>>(
const_coordinate_data(), //
m_valid_row_index.cbegin(), //
out_device_tensor_stride.cbegin(), //
stride_map.coordinate_data(), //
num_threads, m_coordinate_size);
LOG_DEBUG("Stride copy done.");
auto &stride_valid_row_index = stride_map.m_valid_row_index;
auto &stride_valid_map_index = stride_map.m_valid_map_index;
stride_valid_row_index.resize(N); // row indices
stride_valid_map_index.resize(N); // map offset
// Insert coordinates
index_type const unused_key = std::numeric_limits<index_type>::max();
LOG_DEBUG("unused_key", unused_key);
detail::insert_and_map_kernel<coordinate_type, size_type, index_type,
map_type><<<num_blocks, CUDA_NUM_THREADS>>>(
*stride_map.m_map, //
stride_map.const_coordinate_data(), //
stride_valid_map_index.data(), //
stride_valid_row_index.data(), //
num_threads, m_coordinate_size, unused_key);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("Stride map insertion complete");
// Valid row index
auto valid_begin = thrust::make_zip_iterator(
thrust::make_tuple(stride_valid_map_index.begin(), //
stride_valid_row_index.begin()));
size_type const number_of_valid =
thrust::remove_if(thrust::device, //
valid_begin, //
thrust::make_zip_iterator(
thrust::make_tuple(stride_valid_map_index.end(), //
stride_valid_row_index.end())),
detail::is_first<index_type>(unused_key)) -
valid_begin;
stride_valid_row_index.resize(number_of_valid);
stride_valid_map_index.resize(number_of_valid);
stride_map.m_size = number_of_valid;
LOG_DEBUG("Reduced to", number_of_valid);
// remap values
thrust::counting_iterator<uint32_t> count_begin{0};
thrust::for_each(count_begin, count_begin + number_of_valid,
detail::update_value_with_offset<index_type, map_type>{
*stride_map.m_map, stride_map.m_valid_map_index.data()});
LOG_DEBUG("Stride remap done");
return stride_map;
}
namespace detail {
template <typename coordinate_type, typename index_type>
__device__ bool is_coordinate_aligned(coordinate_type *point,
index_type *out_tensor_stride,
uint32_t const size) {
for (uint32_t i = 0; i < size - 1; ++i) {
if (point[i + 1] % out_tensor_stride[i] != 0)
return false;
}
return true;
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void kernel_region_insert(
size_type const num_threads, //
map_type __restrict__ out_map, //
coordinate_type const *const __restrict__ p_in_coordinates, //
index_type const *const __restrict__ in_valid_row_index, //
coordinate_type *__restrict__ p_out_coordinates, //
index_type *__restrict__ out_valid_row_index, //
index_type *__restrict__ out_valid_map_index, //
gpu_kernel_region<coordinate_type> kernel, //
size_type const *const __restrict__ out_tensor_stride, //
index_type const unused_key) { //
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type const coordinate_size = kernel.coordinate_size();
size_type const volume = kernel.volume();
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_tensor_stride = sh_size;
size_type *sh_kernel_size = sh_tensor_stride + coordinate_size;
size_type *sh_dilation = sh_kernel_size + coordinate_size;
size_type *sh_out_tensor_stride = sh_dilation + coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_out_tensor_stride + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = kernel.tensor_stride()[i];
sh_kernel_size[i] = kernel.kernel_size()[i];
sh_dilation[i] = kernel.dilation()[i];
sh_out_tensor_stride[i] = out_tensor_stride[i];
}
__syncthreads();
auto sh_kernel = gpu_kernel_region<coordinate_type>(
kernel, sh_tensor_stride, sh_kernel_size, sh_dilation);
coordinate<coordinate_type> curr_coordinate(sh_tmp);
if (x < num_threads) {
// iterate over values
index_type out_index = x * volume;
// set bounds for the valid keys
for (uint32_t kernel_ind = 0; kernel_ind < volume; ++kernel_ind) {
sh_kernel.coordinate_at(
kernel_ind,
&p_in_coordinates[in_valid_row_index[x] * coordinate_size], sh_tmp);
// Creating generative conv transpose
if (kernel.is_transpose()) {
// initialize out coordinate
for (uint32_t i = 0; i < coordinate_size; ++i)
p_out_coordinates[out_index * coordinate_size + i] =
curr_coordinate[i];
auto const result = out_map.insert(thrust::make_pair(
coordinate<coordinate_type>{
&p_out_coordinates[out_index * coordinate_size]},
out_index));
if (result.second) {
// row index in the out_coordinates
out_valid_row_index[out_index] = out_index;
// offset in the coordinate map
out_valid_map_index[out_index] = result.first.offset();
} else {
out_valid_row_index[out_index] = unused_key;
}
++out_index;
} else {
// skip if the coordinate is not aligned
if (!is_coordinate_aligned(sh_tmp, sh_out_tensor_stride,
coordinate_size)) {
out_valid_row_index[out_index] = unused_key;
++out_index;
} else {
// initialize out coordinate
for (uint32_t i = 0; i < coordinate_size; ++i)
p_out_coordinates[out_index * coordinate_size + i] =
curr_coordinate[i];
auto const result = out_map.insert(thrust::make_pair(
coordinate<coordinate_type>{
&p_out_coordinates[out_index * coordinate_size]},
out_index));
if (result.second) {
// row index in the out_coordinates
out_valid_row_index[out_index] = out_index;
// offset in the coordinate map
out_valid_map_index[out_index] = result.first.offset();
} else {
out_valid_row_index[out_index] = unused_key;
}
++out_index;
}
}
}
}
}
} // namespace detail
/*
* @brief generate a region strided coordinate map
*
* @return a gpu_coordinate_map
*/
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride_region(
cpu_kernel_region<coordinate_type> &kernel,
stride_type const &out_tensor_stride) const {
ASSERT(m_coordinate_size == kernel.coordinate_size(),
"Invalid kernel coordinate_size");
gpu_kernel_region<coordinate_type> gpu_kernel(kernel.to_gpu());
// Over estimate the reserve size to be size();
size_type const N_in = size();
size_type const N_out = N_in * kernel.volume();
LOG_DEBUG("Stride region out tensor stride:", out_tensor_stride,
"with capacity:", N_out);
self_type stride_map(N_out, m_coordinate_size, m_hashtable_occupancy,
out_tensor_stride, m_map_allocator,
base_type::m_byte_allocator);
index_storage_type d_out_tensor_stride(out_tensor_stride);
auto &out_valid_row_index = stride_map.m_valid_row_index;
auto &out_valid_map_index = stride_map.m_valid_map_index;
out_valid_row_index.resize(N_out);
out_valid_map_index.resize(N_out);
index_type const unused_key = std::numeric_limits<index_type>::max();
// (THREAD * D + 3 * D) * 4
uint32_t const shared_memory_size_in_bytes =
4 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation
CUDA_NUM_THREADS * m_coordinate_size * sizeof(coordinate_type); // tmp
detail::kernel_region_insert<coordinate_type, size_type, index_type, map_type>
<<<GET_BLOCKS(N_in, CUDA_NUM_THREADS), CUDA_NUM_THREADS,
shared_memory_size_in_bytes>>>(N_in, //
*stride_map.m_map, //
const_coordinate_data(), //
m_valid_row_index.cbegin(), //
stride_map.coordinate_data(), //
out_valid_row_index.data(), //
out_valid_map_index.data(), //
gpu_kernel, //
d_out_tensor_stride.cbegin(), //
unused_key); //
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("kernel_region_insert done");
// LOG_DEBUG("valid row index", out_valid_row_index);
// LOG_DEBUG("valid map offset", out_valid_map_index);
// remove unused_keys
auto valid_begin = thrust::make_zip_iterator(
thrust::make_tuple(out_valid_row_index.begin(), //
out_valid_map_index.begin()));
size_type const number_of_valid =
thrust::remove_if(thrust::device, //
valid_begin, //
thrust::make_zip_iterator(
thrust::make_tuple(out_valid_row_index.end(), //
out_valid_map_index.end())),
detail::is_first<index_type>(unused_key)) -
valid_begin;
out_valid_row_index.resize(number_of_valid);
out_valid_map_index.resize(number_of_valid);
stride_map.m_size = number_of_valid;
LOG_DEBUG("Reduced to", number_of_valid);
// remap values
thrust::counting_iterator<index_type> count_begin{0};
thrust::for_each(count_begin, count_begin + number_of_valid,
detail::update_value_with_offset<index_type, map_type>{
*stride_map.m_map, out_valid_map_index.data()});
LOG_DEBUG("Stride remap done");
return stride_map;
}
namespace detail {
template <typename dst_coordinate_type, typename src_coordinate_type,
typename size_type, typename index_type, bool stride_src>
__global__ void copy_column_with_valid(
dst_coordinate_type *__restrict__ dst_coordinates, //
size_type const num_threads, //
src_coordinate_type const *__restrict__ src_coordinates, //
index_type const *__restrict__ src_valid_row_index, //
size_type const coordinate_size) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
if (stride_src)
dst_coordinates[x] =
src_coordinates[src_valid_row_index[x] * coordinate_size];
else
dst_coordinates[x * coordinate_size] =
src_coordinates[src_valid_row_index[x]];
}
}
template <typename dst_coordinate_type, typename src_coordinate_type,
typename size_type, bool stride_src>
__global__ void
copy_column(dst_coordinate_type *__restrict__ dst_coordinates, //
size_type const num_threads, //
src_coordinate_type const *__restrict__ src_coordinates, //
size_type const coordinate_size) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
if (stride_src)
dst_coordinates[x] = src_coordinates[x * coordinate_size];
else
dst_coordinates[x * coordinate_size] = src_coordinates[x];
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::origin() const {
size_type const N = size();
LOG_DEBUG("Origin map from in map size:", N);
// tensor stride is set to {0,..., 0} for the origin map.
stride_type origin_tensor_stride(m_coordinate_size - 1);
std::for_each(origin_tensor_stride.begin(), origin_tensor_stride.end(),
[](auto &i) { i = 0; });
// thrust unique for unique batch index
coordinate_type *d_batch_indices = reinterpret_cast<coordinate_type *>(
m_byte_allocator.allocate(N * sizeof(coordinate_type)));
detail::copy_column_with_valid<coordinate_type, coordinate_type, size_type,
index_type, true>
<<<GET_BLOCKS(N, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
d_batch_indices, N, const_coordinate_data(),
m_valid_row_index.cbegin(), m_coordinate_size);
#ifdef DEBUG
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("copied batch indices");
#endif
// Sort and unique
thrust::sort(thrust::device, d_batch_indices, d_batch_indices + N);
#ifdef DEBUG
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("sorted batch indices");
#endif
auto d_batch_indices_end =
thrust::unique(thrust::device, d_batch_indices, d_batch_indices + N);
size_type const N_unique = d_batch_indices_end - d_batch_indices;
#ifdef DEBUG
size_t Nsize = std::min<int>(N_unique, 100);
std::vector<coordinate_type> tmp(Nsize);
CUDA_CHECK(cudaMemcpy(tmp.data(), d_batch_indices,
Nsize * sizeof(coordinate_type),
cudaMemcpyDeviceToHost));
LOG_DEBUG("sort and unique batch", tmp);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("unique done");
#endif
// Create origin map
LOG_DEBUG("Origin map with size:", N_unique,
" tensor stride:", origin_tensor_stride);
self_type origin_map(N_unique, m_coordinate_size, m_hashtable_occupancy,
origin_tensor_stride, m_map_allocator,
base_type::m_byte_allocator);
CUDA_CHECK(
cudaMemset(origin_map.coordinate_data(), 0,
N_unique * m_coordinate_size * sizeof(coordinate_type)));
detail::copy_column<coordinate_type, coordinate_type, size_type, false>
<<<GET_BLOCKS(N_unique, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
origin_map.coordinate_data(), N_unique, d_batch_indices,
m_coordinate_size);
#ifdef DEBUG
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("copied batch indices to the origin_map");
#endif
auto &origin_valid_row_index = origin_map.m_valid_row_index;
auto &origin_valid_map_index = origin_map.m_valid_map_index;
origin_valid_row_index.resize(N_unique);
origin_valid_map_index.resize(N_unique);
origin_map.m_size = N_unique;
// Insert coordinates
auto insert = detail::insert_coordinate<coordinate_type, map_type,
index_type *>{
*origin_map.m_map, // map
origin_map.const_coordinate_data(), // coordinates,
origin_valid_row_index.data(), // valid row
origin_valid_map_index.data(), // iter offset
m_coordinate_size};
thrust::counting_iterator<uint32_t> count_begin{0};
thrust::for_each(thrust::device, count_begin, count_begin + N_unique, insert);
#ifdef DEBUG
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("origin map insertion");
#endif
m_byte_allocator.deallocate((char *)d_batch_indices,
N * sizeof(coordinate_type));
return origin_map;
}
template <typename coordinate_type, typename coordinate_int_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_int_type, TemplatedAllocator>
CoordinateFieldMapGPU<coordinate_type, coordinate_int_type,
TemplatedAllocator>::origin() const {
size_type const N = size();
LOG_DEBUG("Origin map from in map size:", N);
// tensor stride is set to {0,..., 0} for the origin map.
stride_type origin_tensor_stride(m_coordinate_size - 1);
std::for_each(origin_tensor_stride.begin(), origin_tensor_stride.end(),
[](auto &i) { i = 0; });
// thrust unique for unique batch index
coordinate_int_type *d_batch_indices =
reinterpret_cast<coordinate_int_type *>(
m_byte_allocator.allocate(N * sizeof(coordinate_int_type)));
detail::copy_column<coordinate_int_type, coordinate_type, size_type, true>
<<<GET_BLOCKS(N, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
d_batch_indices, N, const_coordinate_data(), m_coordinate_size);
// Sort and unique
thrust::sort(thrust::device, d_batch_indices, d_batch_indices + N);
auto d_batch_indices_end =
thrust::unique(thrust::device, d_batch_indices, d_batch_indices + N);
size_type const N_unique = d_batch_indices_end - d_batch_indices;
// Create origin map
LOG_DEBUG("Origin map with size:", N_unique,
" tensor stride:", origin_tensor_stride);
CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> origin_map(
N_unique, m_coordinate_size, 50, origin_tensor_stride);
CUDA_CHECK(
cudaMemset(origin_map.coordinate_data(), 0,
N_unique * m_coordinate_size * sizeof(coordinate_int_type)));
detail::copy_column<coordinate_int_type, coordinate_int_type, size_type,
false>
<<<GET_BLOCKS(N_unique, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
origin_map.coordinate_data(), N_unique, d_batch_indices,
m_coordinate_size);
m_byte_allocator.deallocate((char *)d_batch_indices,
N * sizeof(coordinate_type));
origin_map.initialize_valid_indices(N_unique);
return origin_map;
}
namespace detail {
template <typename coordinate_field_type, //
typename coordinate_int_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void origin_field_map_kernel(
size_type const num_threads, //
coordinate_field_type const *__restrict__ d_field_coords, //
map_type const __restrict__ origin_map, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps, //
index_type *__restrict__ p_kernels, //
size_type const coordinate_size) {
extern __shared__ coordinate_int_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
// clang-format off
coordinate_int_type *sh_tmp = sh_all + tx * coordinate_size;
// clang-format on
if (x < num_threads)
for (index_type i = 0; i < coordinate_size; ++i)
sh_tmp[i] = 0;
__syncthreads();
if (x < num_threads) {
sh_tmp[0] =
coordinate_int_type(lroundf(d_field_coords[x * coordinate_size]));
auto origin_iter = origin_map.find(coordinate<coordinate_int_type>(sh_tmp));
auto out_index = origin_iter->second;
p_in_maps[x] = x;
p_out_maps[x] = out_index; // origin_map row index
// For kernel_map decompose()
p_kernels[x] = out_index;
}
}
} // namespace detail
template <typename coordinate_field_type, typename coordinate_int_type,
template <typename T> class TemplatedAllocator>
CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type,
TemplatedAllocator>::kernel_map_type
CoordinateFieldMapGPU<coordinate_field_type, coordinate_int_type,
TemplatedAllocator>::
origin_map(CoordinateMapGPU<coordinate_int_type, TemplatedAllocator> const
&origin_map,
uint32_t thread_dim) const {
ASSERT(std::all_of(origin_map.get_tensor_stride().begin(),
origin_map.get_tensor_stride().end(),
[](auto const &i) { return i == 0; }),
"Invalid origin tensor stride", origin_map.get_tensor_stride());
// reserve size();
size_type const in_size = size();
LOG_DEBUG("in_map size:", in_size, "origin_map size:", origin_map.size());
// (THREAD * D) * 4
uint32_t const shared_memory_size_in_bytes =
thread_dim * m_coordinate_size * sizeof(coordinate_int_type); // tmp
size_type const num_threads = in_size;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("origin_map num block", num_blocks);
LOG_DEBUG("origin_map shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("origin_map threads dim", thread_dim);
LOG_DEBUG("origin_map num threads", num_threads);
kernel_map_type kernel_map(in_size, base_type::m_byte_allocator);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("Allocated kernel_map.");
detail::origin_field_map_kernel<coordinate_field_type, coordinate_int_type,
size_type, index_type, int_hash_map_type>
<<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>(
num_threads, //
const_coordinate_data(), //
origin_map.const_hash_map(), //
kernel_map.in_maps.begin(), //
kernel_map.out_maps.begin(), //
kernel_map.kernels.begin(), //
m_coordinate_size);
CUDA_CHECK(cudaStreamSynchronize(0));
THRUST_CHECK(kernel_map.decompose());
LOG_DEBUG("origin map decomposed");
return kernel_map;
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void prune_copy_and_insert(
size_type const num_threads, //
size_type const coordinate_size, //
index_type const unused_map_offset, //
index_type const *const __restrict__ in_valid_row_index, //
coordinate_type const *const __restrict__ in_coordinates, //
bool const *const __restrict__ keep_begin, //
index_type const *const __restrict__ inclusive_scan_keep, //
map_type __restrict__ out_map, //
coordinate_type *__restrict__ out_coordinates, //
index_type *__restrict__ out_valid_row_index, //
index_type *__restrict__ out_valid_map_offset //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
if (!keep_begin[x]) {
out_valid_map_offset[x] = unused_map_offset;
} else {
// If keep,
auto out_row_index = (x < 1) ? 0 : inclusive_scan_keep[x - 1];
coordinate_type const *curr_in_coord =
&in_coordinates[in_valid_row_index[x] * coordinate_size];
coordinate_type *curr_out_coord =
&out_coordinates[out_row_index * coordinate_size];
for (index_type i = 0; i < coordinate_size; ++i)
curr_out_coord[i] = curr_in_coord[i];
// insert to the out_map
auto coord = coordinate<coordinate_type>{curr_out_coord};
// remap the value in the next kernel call
auto result = out_map.insert(thrust::make_pair(coord, 0));
out_valid_row_index[x] = out_row_index;
if (result.second)
out_valid_map_offset[x] = result.first.offset();
else
out_valid_map_offset[x] = unused_map_offset;
}
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void remap(size_type const num_threads, //
map_type const __restrict__ out_map, //
index_type *__restrict__ out_valid_map_offset //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
auto &pair = out_map.data()[out_valid_map_offset[x]];
pair.second = x;
}
}
template <typename Dtype, typename Stype>
__global__ void typed_copy(uint32_t const num_threads, //
Dtype *__restrict__ dst, //
Stype const *__restrict__ src //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
dst[x] = src[x];
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::prune(
bool const *keep_begin, bool const *keep_end) const {
size_type const N = size();
ASSERT(N == keep_end - keep_begin, "Invalid keep size");
LOG_DEBUG("Prune size:", N);
// exclusive sum for coordinate copy.
auto const inclusive_scan_size = N * sizeof(index_type);
index_type *d_inclusive_scan =
(index_type *)m_byte_allocator.allocate(inclusive_scan_size);
// bool -> index_type
detail::typed_copy<<<GET_BLOCKS(N, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
N, d_inclusive_scan, keep_begin);
CUDA_CHECK(cudaStreamSynchronize(0));
thrust::inclusive_scan(thrust::device, d_inclusive_scan, d_inclusive_scan + N,
d_inclusive_scan);
index_type N_pruned;
CUDA_CHECK(cudaMemcpy(&N_pruned, d_inclusive_scan + N - 1, sizeof(index_type),
cudaMemcpyDeviceToHost));
LOG_DEBUG("Pruned N:", N_pruned);
// create a coordinate_map
self_type pruned_map(N, m_coordinate_size, m_hashtable_occupancy,
base_type::m_tensor_stride, m_map_allocator,
base_type::m_byte_allocator);
// Copy and insert kernel that first checks keep[i] is true and insert at
// inclusive_scan[i - 1].
auto &out_valid_map_offset = pruned_map.m_valid_map_index;
auto &out_valid_row_index = pruned_map.m_valid_row_index;
out_valid_map_offset.resize(N);
out_valid_row_index.resize(N);
index_type const unused_map_offset = std::numeric_limits<index_type>::max();
detail::prune_copy_and_insert<coordinate_type, size_type, index_type,
map_type>
<<<GET_BLOCKS(N, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
N, m_coordinate_size, unused_map_offset, m_valid_row_index.cbegin(),
const_coordinate_data(), keep_begin, d_inclusive_scan,
*(pruned_map.m_map), pruned_map.coordinate_data(),
out_valid_row_index.data(), out_valid_map_offset.data());
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("Pruned hash map size:", pruned_map.size());
// Remove not inserted rows
auto valid_begin = thrust::make_zip_iterator(thrust::make_tuple(
out_valid_map_offset.begin(), out_valid_row_index.begin()));
size_type const number_of_valid =
thrust::remove_if(
thrust::device, valid_begin,
thrust::make_zip_iterator(thrust::make_tuple(
out_valid_map_offset.end(), out_valid_row_index.end())),
detail::is_first<index_type>(unused_map_offset)) -
valid_begin;
LOG_DEBUG("number of valid rows:", number_of_valid);
out_valid_map_offset.resize(number_of_valid);
out_valid_row_index.resize(number_of_valid);
pruned_map.m_size = number_of_valid;
// remap the final map values
detail::remap<coordinate_type, size_type, index_type, map_type>
<<<GET_BLOCKS(number_of_valid, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
number_of_valid, *(pruned_map.m_map), out_valid_map_offset.data());
CUDA_CHECK(cudaStreamSynchronize(0));
m_byte_allocator.deallocate((char *)d_inclusive_scan, inclusive_scan_size);
return pruned_map;
}
// Merge
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
copy_coordinates_by_offset(map_type __restrict__ map, //
coordinate_type *__restrict__ coordinates, //
index_type const *__restrict__ map_offsets, //
size_type const num_threads, //
size_type const coordinate_size //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
typename map_type::value_type const *p_value = map.data() + map_offsets[x];
// Compute Capabilities 3.5 or newer
coordinate_type *dst_coordinate =
coordinates + p_value->second * coordinate_size;
for (index_type i = 0; i < coordinate_size; ++i)
dst_coordinate[i] = p_value->first[i];
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void copy_coordinates_by_valid_row(
// map_type __restrict__ map, //
coordinate_type const *__restrict__ in_coordinates, //
coordinate_type *__restrict__ out_coordinates, //
index_type const *__restrict__ valid_row, //
size_type const num_threads, //
size_type const coordinate_size //
) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
// Compute Capabilities 3.5 or newer
index_type const row_index = x / coordinate_size;
index_type const col_index = x % coordinate_size;
out_coordinates[row_index * coordinate_size + col_index] =
in_coordinates[valid_row[row_index] * coordinate_size + col_index];
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void insert_and_map_kernel_with_offset(
map_type __restrict__ map, //
coordinate_type const *__restrict__ coordinates, //
index_type const coordinate_row_offset, //
index_type *__restrict__ valid_map_index, //
index_type *__restrict__ valid_row_index, //
size_type const num_threads, //
size_type const coordinate_size, //
index_type const unused_key) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
// m_map.insert(pair);
// Returns pair<iterator, (bool)insert_success>
auto const result = map.insert(thrust::make_pair(
coordinate<coordinate_type>{&coordinates[x * coordinate_size]}, x));
if (result.second) {
valid_row_index[x] = x + coordinate_row_offset;
// success map index. remove failed insertion with success.
valid_map_index[x] = result.first.offset();
} else {
valid_map_index[x] = unused_key;
}
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::merge(
std::vector<std::reference_wrapper<self_type>> const &maps) const {
// reserve size
size_t all_size = std::accumulate(
maps.begin(), maps.end(), 0,
[](size_t sum, const self_type &map) { return sum + map.size(); });
LOG_DEBUG("Out merge map capacity:", all_size);
self_type merged_map(all_size, m_coordinate_size, m_hashtable_occupancy,
base_type::m_tensor_stride, m_map_allocator,
base_type::m_byte_allocator);
merged_map.m_valid_row_index.resize(all_size);
merged_map.m_valid_map_index.resize(all_size);
// Copy valid coordinates to the merged map
coordinate_type *curr_coordinates = merged_map.coordinate_data();
index_type *curr_valid_map_offset = merged_map.m_valid_map_index.data();
index_type *curr_valid_row_index = merged_map.m_valid_row_index.data();
index_type const unused_key = std::numeric_limits<index_type>::max();
index_type row_offset{0};
for (self_type const &map : maps) {
size_type const num_threads = map.size();
if (num_threads == 0)
continue;
size_type const num_blocks =
GET_BLOCKS(num_threads * m_coordinate_size, CUDA_NUM_THREADS);
LOG_DEBUG("Current merge map size:", num_threads);
detail::copy_coordinates_by_valid_row<coordinate_type, size_type,
index_type, map_type>
<<<num_blocks, CUDA_NUM_THREADS>>>(map.const_coordinate_data(), //
curr_coordinates, //
map.m_valid_row_index.cdata(), //
num_threads * m_coordinate_size, //
m_coordinate_size);
detail::insert_and_map_kernel_with_offset<coordinate_type, size_type,
index_type, map_type>
<<<num_blocks, CUDA_NUM_THREADS>>>(*(merged_map.m_map),
curr_coordinates, //
row_offset, //
curr_valid_map_offset, //
curr_valid_row_index, //
num_threads, m_coordinate_size,
unused_key);
CUDA_CHECK(cudaStreamSynchronize(0));
curr_coordinates += num_threads * m_coordinate_size;
curr_valid_map_offset += num_threads;
curr_valid_row_index += num_threads;
row_offset += num_threads;
}
// Remove invalid maps
auto valid_begin = thrust::make_zip_iterator(
thrust::make_tuple(merged_map.m_valid_map_index.begin(),
merged_map.m_valid_row_index.begin()));
size_type const number_of_valid =
thrust::remove_if(thrust::device, valid_begin,
thrust::make_zip_iterator(thrust::make_tuple(
merged_map.m_valid_map_index.end(),
merged_map.m_valid_row_index.end())),
detail::is_first<index_type>(unused_key)) -
valid_begin;
// remap the final map row index and the map offset
detail::remap<coordinate_type, size_type, index_type, map_type>
<<<GET_BLOCKS(number_of_valid, CUDA_NUM_THREADS), CUDA_NUM_THREADS>>>(
number_of_valid, *(merged_map.m_map),
merged_map.m_valid_map_index.data());
merged_map.m_valid_row_index.resize(number_of_valid);
merged_map.m_valid_map_index.resize(number_of_valid);
merged_map.m_size = number_of_valid;
return merged_map;
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
count_kernel(map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ out_valid_map_index, //
size_type const num_threads, //
gpu_kernel_region<coordinate_type> kernel, //
index_type *__restrict__ p_count_per_thread) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type const coordinate_size = kernel.coordinate_size();
size_type const volume = kernel.volume();
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_tensor_stride = sh_size;
size_type *sh_kernel_size = sh_tensor_stride + coordinate_size;
size_type *sh_dilation = sh_kernel_size + coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
auto const equal = out_map.get_key_equal();
// kernel_maps
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = kernel.tensor_stride()[i];
sh_kernel_size[i] = kernel.kernel_size()[i];
sh_dilation[i] = kernel.dilation()[i];
}
__syncthreads();
auto sh_kernel = gpu_kernel_region<coordinate_type>(
kernel, sh_tensor_stride, sh_kernel_size, sh_dilation);
coordinate<coordinate_type> point(sh_tmp);
auto const unused_key = out_map.get_unused_key();
if (x < num_threads) {
size_type count = 0;
typename map_type::value_type const &out_value =
out_map.data()[out_valid_map_index[x]];
// valid_index guarantees that it contains a valid value
if (!equal(out_value.first, unused_key)) {
for (auto kernel_ind = 0; kernel_ind < volume; ++kernel_ind) {
sh_kernel.coordinate_at(kernel_ind, out_value.first.data(), sh_tmp);
if (in_map.find(point) != in_map.end()) {
++count;
}
}
}
p_count_per_thread[x] = count;
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void preallocated_kernel_map_iteration(
map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ out_valid_map_index, //
size_type const num_threads, //
gpu_kernel_region<coordinate_type> kernel, //
index_type const *const __restrict__ inclusive_count_cumsum_per_thread, //
index_type *__restrict__ p_kernels, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type const coordinate_size = kernel.coordinate_size();
size_type const volume = kernel.volume();
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_tensor_stride = sh_size;
size_type *sh_kernel_size = sh_tensor_stride + coordinate_size;
size_type *sh_dilation = sh_kernel_size + coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
auto const equal = out_map.get_key_equal();
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = kernel.tensor_stride()[i];
sh_kernel_size[i] = kernel.kernel_size()[i];
sh_dilation[i] = kernel.dilation()[i];
}
__syncthreads();
auto sh_kernel = gpu_kernel_region<coordinate_type>(
kernel, sh_tensor_stride, sh_kernel_size, sh_dilation);
coordinate<coordinate_type> curr_coordinate(sh_tmp);
auto const unused_key = out_map.get_unused_key();
if (x < num_threads) {
// iterate over values
auto kernel_map_index =
(x < 1) ? 0 : inclusive_count_cumsum_per_thread[x - 1];
typename map_type::value_type const &out_value =
out_map.data()[out_valid_map_index[x]];
if (!equal(out_value.first, unused_key)) {
// set bounds for the valid keys
for (uint32_t kernel_index = 0; kernel_index < volume; ++kernel_index) {
sh_kernel.coordinate_at(kernel_index, out_value.first.data(), sh_tmp);
auto const &in_result = in_map.find(curr_coordinate);
if (in_result != in_map.end()) {
// insert to
p_kernels[kernel_map_index] = kernel_index;
p_in_maps[kernel_map_index] = (*in_result).second;
p_out_maps[kernel_map_index] = out_value.second;
++kernel_map_index;
}
}
}
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
direct_in_out_map(size_type const num_threads, //
map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ out_valid_map_offset, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps,
index_type const unused_key) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
typename map_type::value_type const &out_value =
out_map.data()[out_valid_map_offset[x]];
auto const &result = in_map.find(out_value.first);
if (result != in_map.end()) {
p_in_maps[x] = (*result).second;
p_out_maps[x] = out_value.second;
} else {
p_in_maps[x] = unused_key;
}
}
}
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
direct_kernel_map(map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ out_valid_map_index, //
size_type const num_threads, //
gpu_kernel_region<coordinate_type> kernel, //
index_type *__restrict__ p_kernels, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps,
index_type const unused_map_value) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
size_type const coordinate_size = kernel.coordinate_size();
size_type const volume = kernel.volume();
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_tensor_stride = sh_size;
size_type *sh_kernel_size = sh_tensor_stride + coordinate_size;
size_type *sh_dilation = sh_kernel_size + coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_dilation + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
auto const equal = out_map.get_key_equal();
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = kernel.tensor_stride()[i];
sh_kernel_size[i] = kernel.kernel_size()[i];
sh_dilation[i] = kernel.dilation()[i];
}
__syncthreads();
auto sh_kernel = gpu_kernel_region<coordinate_type>(
kernel, sh_tensor_stride, sh_kernel_size, sh_dilation);
auto const unused_key = out_map.get_unused_key();
if (x < num_threads) {
// iterate over values
index_type kernel_index = x % volume;
typename map_type::value_type const &out_value =
out_map.data()[out_valid_map_index[x / volume]];
if (!equal(out_value.first, unused_key)) {
// set bounds for the valid keys
// TODO: copy the curr_coordinate to sh_curr_coordinate
sh_kernel.coordinate_at(kernel_index, out_value.first.data(), sh_tmp);
auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp));
if (in_result != in_map.end()) {
// insert to
p_kernels[x] = kernel_index;
p_in_maps[x] = (*in_result).second;
p_out_maps[x] = out_value.second;
} else {
p_kernels[x] = unused_map_value;
}
}
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map(
self_type const &out_map, gpu_kernel_region<coordinate_type> const &kernel,
CUDAKernelMapMode::Mode kernel_map_mode, uint32_t thread_dim) const {
// Over estimate the reserve size to be size();
size_type const out_size = out_map.size();
size_type const kernel_volume = kernel.volume();
ASSERT(kernel_volume > 0, "Invalid kernel");
if (kernel_volume == 1) {
// directly iterate over all output first by finding all in out map.
auto const N = out_size;
LOG_DEBUG("out_map size:", N);
index_type *in_out_map = (index_type *)base_type::m_byte_allocator.allocate(
2 * (N + 1) * sizeof(index_type));
index_type *ins = in_out_map;
index_type *outs =
in_out_map + N + 1; // for __restrict__ collision prevention
index_type unused_key = std::numeric_limits<index_type>::max();
detail::direct_in_out_map<coordinate_type, size_type, index_type, map_type>
<<<GET_BLOCKS(N, thread_dim), thread_dim>>>(
N, *m_map, //
*(out_map.m_map), //
out_map.m_valid_map_index.cdata(), //
ins, // in map
outs, // out map
unused_key);
LOG_DEBUG("Direct in out map copy done");
auto begin = thrust::make_zip_iterator(thrust::make_tuple(ins, outs));
auto const valid_size =
thrust::remove_if(
thrust::device, begin,
thrust::make_zip_iterator(thrust::make_tuple(ins + N, outs + N)),
detail::is_first<index_type>(unused_key)) -
begin;
LOG_DEBUG("Valid size:", valid_size);
kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator, false);
CUDA_CHECK(cudaMemcpy(kernel_map.in_maps.data(), ins,
valid_size * sizeof(index_type),
cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(kernel_map.out_maps.data(), outs,
valid_size * sizeof(index_type),
cudaMemcpyDeviceToDevice));
base_type::m_byte_allocator.deallocate((char *)in_out_map,
2 * (N + 1) * sizeof(index_type));
LOG_DEBUG("Cleaning up");
return kernel_map;
} else if (kernel_map_mode == CUDAKernelMapMode::MEMORY_EFFICIENT &&
kernel.region_type() != RegionType::CUSTOM) {
// (THREAD * D + 3 * D) * 4
uint32_t const shared_memory_size_in_bytes =
3 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation
thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp
// clang-format on
size_type const num_threads = out_size;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("num block", num_blocks);
LOG_DEBUG("out_map size", out_map.size());
LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("threads dim", thread_dim);
LOG_DEBUG("num threads", num_threads);
index_type *d_p_count_per_thread = reinterpret_cast<index_type *>(
base_type::m_byte_allocator.allocate(num_threads * sizeof(index_type)));
// Initialize count per thread
detail::count_kernel<coordinate_type, size_type, index_type, map_type>
<<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>(
*m_map, //
*out_map.m_map, //
out_map.m_valid_map_index.cbegin(), //
num_threads, //
kernel, //
d_p_count_per_thread);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("count_kernel finished");
thrust::inclusive_scan(thrust::device, d_p_count_per_thread,
d_p_count_per_thread + num_threads,
d_p_count_per_thread);
index_type num_kernel_map; // type following the kernel map allocator
CUDA_CHECK(cudaMemcpy(&num_kernel_map,
d_p_count_per_thread + num_threads - 1,
sizeof(index_type), cudaMemcpyDeviceToHost));
// set kernel map
LOG_DEBUG("Found", num_kernel_map, "kernel map elements.");
kernel_map_type kernel_map(num_kernel_map, base_type::m_byte_allocator);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("Allocated kernel_map.");
detail::preallocated_kernel_map_iteration<coordinate_type, size_type,
index_type, map_type>
<<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>(
*m_map, //
*out_map.m_map, //
out_map.m_valid_map_index.cbegin(), //
num_threads, //
kernel, //
d_p_count_per_thread, //
kernel_map.kernels.begin(), //
kernel_map.in_maps.begin(), //
kernel_map.out_maps.begin());
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("Preallocated kernel map done");
THRUST_CHECK(kernel_map.decompose());
base_type::m_byte_allocator.deallocate(
reinterpret_cast<char *>(d_p_count_per_thread),
num_threads * sizeof(index_type));
LOG_DEBUG("cudaFree");
return kernel_map;
} else if (kernel_map_mode == CUDAKernelMapMode::SPEED_OPTIMIZED &&
kernel.region_type() != RegionType::CUSTOM) {
// (THREAD * 3 * D + 3 * D) * 4
uint32_t const shared_memory_size_in_bytes =
3 * m_coordinate_size * sizeof(index_type) + // stride, kernel, dilation
(thread_dim + (thread_dim + kernel_volume - 1) / kernel_volume) *
m_coordinate_size *
sizeof(coordinate_type); // tmp coordinate + current coordinate
size_type const num_threads = out_size * kernel_volume;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("num block", num_blocks);
LOG_DEBUG("out_map size", out_map.size());
LOG_DEBUG("kernel_volume", kernel_volume);
LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("threads dim", thread_dim);
LOG_DEBUG("num threads", num_threads);
index_type unused_map_value = std::numeric_limits<index_type>::max();
index_type *d_p_valid_in_index =
reinterpret_cast<index_type *>(base_type::m_byte_allocator.allocate(
3 * (num_threads + 1) * sizeof(index_type)));
index_type *d_p_valid_out_index = d_p_valid_in_index + num_threads + 1;
index_type *d_p_valid_kernel_index = d_p_valid_out_index + num_threads + 1;
// Initialize count per thread
detail::direct_kernel_map<coordinate_type, size_type, index_type, map_type>
<<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>(
*m_map, //
*out_map.m_map, //
out_map.m_valid_map_index.cbegin(), //
num_threads, //
kernel, //
d_p_valid_kernel_index, //
d_p_valid_in_index, //
d_p_valid_out_index, //
unused_map_value);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("direct_kernel_map finished");
auto begin = thrust::make_zip_iterator(thrust::make_tuple(
d_p_valid_kernel_index, d_p_valid_in_index, d_p_valid_out_index));
auto const valid_size =
thrust::remove_if(thrust::device, begin,
thrust::make_zip_iterator(thrust::make_tuple(
d_p_valid_kernel_index + num_threads,
d_p_valid_in_index + num_threads,
d_p_valid_out_index + num_threads)),
detail::is_first<index_type>(unused_map_value)) -
begin;
LOG_DEBUG("Valid size:", valid_size);
kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator);
CUDA_CHECK(cudaMemcpy(kernel_map.kernels.data(), d_p_valid_kernel_index,
valid_size * sizeof(index_type),
cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(kernel_map.in_maps.data(), d_p_valid_in_index,
valid_size * sizeof(index_type),
cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(kernel_map.out_maps.data(), d_p_valid_out_index,
valid_size * sizeof(index_type),
cudaMemcpyDeviceToDevice));
THRUST_CHECK(kernel_map.decompose());
base_type::m_byte_allocator.deallocate(
reinterpret_cast<char *>(d_p_valid_in_index),
3 * (num_threads + 1) * sizeof(index_type));
LOG_DEBUG("cudaFree");
return kernel_map;
} else { // kernel volume == 1
ASSERT(false, "Not implemented");
}
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
stride_map_kernel(map_type const __restrict__ in_map, //
map_type const __restrict__ out_map, //
index_type const *const __restrict__ in_valid_map_index, //
size_type const num_threads, //
index_type const *const __restrict__ stride, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps,
size_type const coordinate_size,
index_type const unused_key) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
// clang-format off
size_type *sh_size = reinterpret_cast<size_type *>(sh_all);
size_type *sh_stride = sh_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_size + coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
// clang-format on
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_stride[i] = stride[i];
}
__syncthreads();
if (x >= num_threads)
return;
typename map_type::value_type const &in_value =
in_map.data()[in_valid_map_index[x]];
sh_tmp[0] = in_value.first[0];
for (index_type j = 1; j < coordinate_size; ++j) {
sh_tmp[j] =
(__float2int_rd(__fdiv_rd(in_value.first[j], sh_stride[j - 1]))) *
sh_stride[j - 1];
}
auto out_iter = out_map.find(coordinate<coordinate_type>(sh_tmp));
if (out_iter == out_map.end()) {
p_in_maps[x] = unused_key;
} else {
p_in_maps[x] = in_value.second;
p_out_maps[x] = out_iter->second;
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::stride_map(
self_type const &out_map, stride_type const &out_tensor_stride,
uint32_t thread_dim) const {
LOG_DEBUG("generating stride_map from stride", base_type::m_tensor_stride,
"to", out_map.get_tensor_stride());
// Over estimate the reserve size to be size();
size_type const in_size = size();
index_storage_type d_out_tensor_stride(out_tensor_stride);
index_type unused_key = std::numeric_limits<index_type>::max();
// (THREAD * D + D) * 4
uint32_t const shared_memory_size_in_bytes =
m_coordinate_size * sizeof(index_type) + // stride
thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp
size_type const num_threads = in_size;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("num block", num_blocks);
LOG_DEBUG("shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("threads dim", thread_dim);
LOG_DEBUG("num threads", num_threads);
index_type *in_out_map = (index_type *)base_type::m_byte_allocator.allocate(
2 * (in_size + 1) * sizeof(index_type));
index_type *ins = in_out_map;
index_type *outs =
in_out_map + in_size + 1; // for __restrict__ collision prevention
LOG_DEBUG("Allocated temporary memory");
LOG_DEBUG("out_map size", out_map.size(),
"out tensor stride:", out_map.get_tensor_stride(),
"coordinate_size", m_coordinate_size);
detail::stride_map_kernel<coordinate_type, size_type, index_type, map_type>
<<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>(
*m_map, //
*out_map.m_map, //
m_valid_map_index.cbegin(), //
num_threads, //
d_out_tensor_stride.cbegin(), //
ins, //
outs, //
m_coordinate_size, //
unused_key);
auto begin = thrust::make_zip_iterator(thrust::make_tuple(ins, outs));
auto const valid_size =
thrust::remove_if(thrust::device, begin,
thrust::make_zip_iterator(
thrust::make_tuple(ins + in_size, outs + in_size)),
detail::is_first<index_type>(unused_key)) -
begin;
LOG_DEBUG("Valid size:", valid_size);
kernel_map_type kernel_map(valid_size, base_type::m_byte_allocator, false);
CUDA_CHECK(cudaMemcpy(kernel_map.in_maps.data(), ins,
valid_size * sizeof(index_type),
cudaMemcpyDeviceToDevice));
CUDA_CHECK(cudaMemcpy(kernel_map.out_maps.data(), outs,
valid_size * sizeof(index_type),
cudaMemcpyDeviceToDevice));
base_type::m_byte_allocator.deallocate(
(char *)in_out_map, 2 * (in_size + 1) * sizeof(index_type));
return kernel_map;
}
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename map_type>
__global__ void
origin_map_kernel(map_type const __restrict__ in_map, //
map_type const __restrict__ origin_map, //
index_type const *const __restrict__ in_valid_map_index, //
size_type const num_threads, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps,
index_type *__restrict__ p_kernels,
size_type const coordinate_size) {
extern __shared__ coordinate_type sh_all[];
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
// clang-format off
coordinate_type *sh_tmp = sh_all + tx * coordinate_size;
// clang-format on
if (x < num_threads)
for (index_type i = 0; i < coordinate_size; ++i)
sh_tmp[i] = 0;
__syncthreads();
if (x < num_threads) {
typename map_type::value_type const &in_value =
in_map.data()[in_valid_map_index[x]];
sh_tmp[0] = in_value.first[0];
auto origin_iter = origin_map.find(coordinate<coordinate_type>(sh_tmp));
p_in_maps[x] = in_value.second;
p_out_maps[x] = origin_iter->second; // origin_map row index
// For kernel_map decompose()
p_kernels[x] = origin_iter->second;
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::kernel_map_type
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::origin_map(
self_type const &origin_map, uint32_t thread_dim) const {
ASSERT(std::all_of(origin_map.get_tensor_stride().begin(),
origin_map.get_tensor_stride().end(),
[](auto const &i) { return i == 0; }),
"Invalid origin tensor stride", origin_map.get_tensor_stride());
// reserve size();
size_type const in_size = size();
LOG_DEBUG("in_map size:", in_size, "origin_map size:", origin_map.size());
// (THREAD * D) * 4
uint32_t const shared_memory_size_in_bytes =
thread_dim * m_coordinate_size * sizeof(coordinate_type); // tmp
size_type const num_threads = in_size;
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
LOG_DEBUG("origin_map num block", num_blocks);
LOG_DEBUG("origin_map shared_memory size", shared_memory_size_in_bytes);
LOG_DEBUG("origin_map threads dim", thread_dim);
LOG_DEBUG("origin_map num threads", num_threads);
kernel_map_type kernel_map(in_size, base_type::m_byte_allocator);
CUDA_CHECK(cudaStreamSynchronize(0));
LOG_DEBUG("Allocated kernel_map.");
detail::origin_map_kernel<coordinate_type, size_type, index_type, map_type>
<<<num_blocks, thread_dim, shared_memory_size_in_bytes>>>(
*m_map, //
*origin_map.m_map, //
m_valid_map_index.cbegin(), //
num_threads, //
kernel_map.in_maps.begin(), //
kernel_map.out_maps.begin(), //
kernel_map.kernels.begin(), //
m_coordinate_size);
CUDA_CHECK(cudaStreamSynchronize(0));
THRUST_CHECK(kernel_map.decompose());
LOG_DEBUG("origin map decomposed");
return kernel_map;
}
namespace detail {
template <typename coordinate_type,
typename index_type, //
typename stride_type, //
typename float_type, //
typename map_type>
__global__ void
interpolation_kernel(map_type __restrict__ in_map, //
index_type const num_threads, //
float_type const *__restrict__ p_tfield, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps, //
float_type *__restrict__ p_weights, //
stride_type const *__restrict__ p_tensor_stride, //
index_type const unused_map_value,
index_type const coordinate_size,
index_type const neighbor_volume) {
// coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type)
// + THREADS * coordinate_size * sizeof(coordinate_type)
SharedMemory<float_type> shared;
float_type *sh_all = shared.getPointer();
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
float_type *sh_tfield = sh_all + tx * coordinate_size;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(
sh_all + CUDA_NUM_THREADS * coordinate_size);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
index_type *sh_tensor_stride = reinterpret_cast<index_type *>(
sh_coordinate + CUDA_NUM_THREADS * coordinate_size);
auto const equal = in_map.get_key_equal();
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = p_tensor_stride[i];
}
if (x < num_threads) {
index_type const offset = coordinate_size * (x / neighbor_volume);
for (index_type i = 0; i < coordinate_size; ++i) {
sh_tfield[i] = p_tfield[offset + i];
}
}
__syncthreads();
if (x < num_threads) {
// iterate over values
uint32_t neighbor_ind = x % neighbor_volume;
// batch index
sh_tmp[0] = lrint(sh_tfield[0]);
uint32_t mask = 1;
for (uint32_t j = coordinate_size - 1; j > 0; --j) {
index_type curr_tensor_stride = sh_tensor_stride[j - 1];
if ((neighbor_ind & mask) == 0)
sh_tmp[j] =
floor(sh_tfield[j] / curr_tensor_stride) * curr_tensor_stride;
else
sh_tmp[j] =
floor(sh_tfield[j] / curr_tensor_stride) * curr_tensor_stride +
curr_tensor_stride;
mask = mask << 1;
}
auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp));
if (in_result != in_map.end()) {
p_in_maps[x] = (*in_result).second;
p_out_maps[x] = x / neighbor_volume;
// Compute weight
float_type weight = 1;
for (uint32_t j = 1; j < coordinate_size; ++j) {
weight *= 1 - abs(sh_tfield[j] - sh_tmp[j]) / sh_tensor_stride[j - 1];
}
p_weights[x] = weight;
} else {
p_in_maps[x] = unused_map_value;
}
}
}
template <typename coordinate_type,
typename index_type, //
typename stride_type, //
typename float_type, //
typename map_type>
__global__ void
field_map_kernel(map_type __restrict__ in_map, //
index_type const num_threads, //
float_type const *__restrict__ p_tfield, //
index_type *__restrict__ p_in_maps, //
index_type *__restrict__ p_out_maps, //
stride_type const *__restrict__ p_tensor_stride, //
index_type const unused_map_value,
index_type const coordinate_size) {
// coordinate_size * sizeof(index_type) + coordinate_size * sizeof(float_type)
// + THREADS * coordinate_size * sizeof(coordinate_type)
SharedMemory<float_type> shared;
float_type *sh_all = shared.getPointer();
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
coordinate_type *sh_coordinate = reinterpret_cast<coordinate_type *>(sh_all);
coordinate_type *sh_tmp = sh_coordinate + tx * coordinate_size;
index_type *sh_tensor_stride = reinterpret_cast<index_type *>(
sh_coordinate + CUDA_NUM_THREADS * coordinate_size);
auto const equal = in_map.get_key_equal();
for (index_type i = tx; i < coordinate_size - 1; i += blockDim.x) {
sh_tensor_stride[i] = p_tensor_stride[i];
}
__syncthreads();
index_type const offset = coordinate_size * x;
if (x < num_threads) {
// iterate over values
float_type const *curr_tfield = p_tfield + offset;
// batch index
sh_tmp[0] = lrint(curr_tfield[0]);
for (uint32_t j = coordinate_size - 1; j > 0; --j) {
index_type curr_tensor_stride = sh_tensor_stride[j - 1];
sh_tmp[j] =
floor(curr_tfield[j] / curr_tensor_stride) * curr_tensor_stride;
}
auto const &in_result = in_map.find(coordinate<coordinate_type>(sh_tmp));
if (in_result != in_map.end()) {
p_in_maps[x] = (*in_result).second;
p_out_maps[x] = x;
} else {
p_in_maps[x] = unused_map_value;
}
}
}
// interpolation map inst
template <typename coordinate_type, typename index_type, typename size_type,
typename stride_type, typename field_type, typename map_type,
typename ByteAllocatorType>
std::vector<at::Tensor> interpolation_map_weight_tfield_type(
uint32_t const num_tfield, //
uint32_t const coordinate_size, //
index_type const unused_key, //
field_type const *const p_tfield, //
map_type &map, //
stride_type const *const p_tensor_stride, //
ByteAllocatorType const &byte_allocator,
c10::TensorOptions tfield_options) {
uint32_t const neighbor_volume = std::pow(2, (coordinate_size - 1));
size_type num_threads = neighbor_volume * num_tfield;
LOG_DEBUG("neighbor_volume:", neighbor_volume, "num_tfield:", num_tfield,
"num_threads:", num_threads);
index_type *d_in_map = reinterpret_cast<index_type *>(
byte_allocator.allocate(num_threads * sizeof(index_type)));
index_type *d_out_map = reinterpret_cast<index_type *>(
byte_allocator.allocate(num_threads * sizeof(index_type)));
field_type *d_weight = reinterpret_cast<field_type *>(
byte_allocator.allocate(num_threads * sizeof(field_type)));
size_type shared_memory_size_in_bytes =
coordinate_size * CUDA_NUM_THREADS * sizeof(field_type) +
coordinate_size * CUDA_NUM_THREADS * sizeof(coordinate_type) +
coordinate_size * sizeof(index_type);
LOG_DEBUG("Shared memory size:", shared_memory_size_in_bytes);
interpolation_kernel<coordinate_type, index_type, stride_type, field_type,
map_type>
<<<GET_BLOCKS(num_threads, CUDA_NUM_THREADS), CUDA_NUM_THREADS,
shared_memory_size_in_bytes>>>(map, //
num_threads, //
p_tfield, //
d_in_map, //
d_out_map, //
d_weight, //
p_tensor_stride, //
unused_key, //
coordinate_size, //
neighbor_volume);
// remove unused_keys
auto valid_begin =
thrust::make_zip_iterator(thrust::make_tuple(d_in_map, //
d_out_map, d_weight));
size_type const number_of_valid =
thrust::remove_if(thrust::device, //
valid_begin, //
thrust::make_zip_iterator(thrust::make_tuple(
d_in_map + num_threads, //
d_out_map + num_threads, d_weight + num_threads)),
detail::is_first<index_type>(unused_key)) -
valid_begin;
LOG_DEBUG("number_of_valid:", number_of_valid);
auto final_in_map =
torch::empty({number_of_valid},
tfield_options.dtype(torch::kInt32).requires_grad(false));
auto final_out_map =
torch::empty({number_of_valid},
tfield_options.dtype(torch::kInt32).requires_grad(false));
auto final_weights =
torch::empty({number_of_valid}, tfield_options.requires_grad(false));
if (number_of_valid > 0) {
CUDA_CHECK(cudaMemcpy(final_in_map.template data_ptr<int32_t>(), d_in_map,
number_of_valid * sizeof(int32_t),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(final_out_map.template data_ptr<int32_t>(), d_out_map,
number_of_valid * sizeof(int32_t),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(final_weights.template data_ptr<field_type>(),
d_weight, number_of_valid * sizeof(field_type),
cudaMemcpyHostToDevice));
}
byte_allocator.deallocate((char *)d_in_map, num_threads * sizeof(index_type));
byte_allocator.deallocate((char *)d_out_map,
num_threads * sizeof(index_type));
byte_allocator.deallocate((char *)d_weight, num_threads * sizeof(field_type));
return {final_in_map, final_out_map, final_weights};
}
// interpolation map inst
template <typename coordinate_type, typename index_type, typename size_type,
typename stride_type, typename field_type, typename map_type,
typename ByteAllocatorType>
std::pair<at::Tensor, at::Tensor>
field_map_type(uint32_t const num_tfield, //
uint32_t const coordinate_size, //
index_type const unused_key, //
field_type const *const p_tfield, //
map_type &map, //
stride_type const *const p_tensor_stride, //
ByteAllocatorType const &byte_allocator) {
size_type num_threads = num_tfield;
LOG_DEBUG("num_threads:", num_threads);
index_type *d_in_map = reinterpret_cast<index_type *>(
byte_allocator.allocate(num_threads * sizeof(index_type)));
index_type *d_out_map = reinterpret_cast<index_type *>(
byte_allocator.allocate(num_threads * sizeof(index_type)));
size_type shared_memory_size_in_bytes =
coordinate_size * CUDA_NUM_THREADS * sizeof(coordinate_type) +
coordinate_size * sizeof(index_type);
LOG_DEBUG("Shared memory size:", shared_memory_size_in_bytes);
field_map_kernel<coordinate_type, index_type, stride_type, field_type,
map_type>
<<<GET_BLOCKS(num_threads, CUDA_NUM_THREADS), CUDA_NUM_THREADS,
shared_memory_size_in_bytes>>>(map, //
num_threads, //
p_tfield, //
d_in_map, //
d_out_map, //
p_tensor_stride, //
unused_key, //
coordinate_size);
// remove unused_keys
auto valid_begin =
thrust::make_zip_iterator(thrust::make_tuple(d_in_map, d_out_map));
size_type const number_of_valid =
thrust::remove_if(thrust::device, //
valid_begin, //
thrust::make_zip_iterator(
thrust::make_tuple(d_in_map + num_threads, //
d_out_map + num_threads)),
detail::is_first<index_type>(unused_key)) -
valid_begin;
LOG_DEBUG("number_of_valid:", number_of_valid);
auto curr_device = at::cuda::current_device();
auto tfield_options = torch::TensorOptions({at::kCUDA, curr_device})
.dtype(torch::kInt32)
.requires_grad(false);
auto final_in_map = torch::empty({number_of_valid}, tfield_options);
auto final_out_map = torch::empty({number_of_valid}, tfield_options);
if (number_of_valid > 0) {
CUDA_CHECK(cudaMemcpy(final_in_map.template data_ptr<int32_t>(), d_in_map,
number_of_valid * sizeof(int32_t),
cudaMemcpyHostToDevice));
CUDA_CHECK(cudaMemcpy(final_out_map.template data_ptr<int32_t>(), d_out_map,
number_of_valid * sizeof(int32_t),
cudaMemcpyHostToDevice));
}
byte_allocator.deallocate((char *)d_in_map, num_threads * sizeof(index_type));
byte_allocator.deallocate((char *)d_out_map,
num_threads * sizeof(index_type));
return {final_in_map, final_out_map};
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
std::vector<at::Tensor>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::interpolation_map_weight(
at::Tensor const &tfield) const {
// Over estimate the reserve size to be size();
ASSERT(tfield.dim() == 2, "Invalid tfield dimension");
ASSERT(tfield.size(1) == m_coordinate_size, "Invalid tfield size");
size_type const num_tfield = tfield.size(0);
uint32_t const neighbor_volume = std::pow(2, (m_coordinate_size - 1));
index_type const unused_key = std::numeric_limits<index_type>::max();
LOG_DEBUG("map size", m_size);
switch (tfield.scalar_type()) {
case at::ScalarType::Double:
return detail::interpolation_map_weight_tfield_type<
coordinate_type, index_type, size_type, index_type, double, map_type,
TemplatedAllocator<char>>(num_tfield, //
m_coordinate_size, //
unused_key, //
tfield.template data_ptr<double>(), //
*m_map, //
m_device_tensor_stride.cbegin(), //
m_byte_allocator, //
tfield.options());
case at::ScalarType::Float:
return detail::interpolation_map_weight_tfield_type<
coordinate_type, index_type, size_type, index_type, float, map_type,
TemplatedAllocator<char>>(num_tfield, //
m_coordinate_size, //
unused_key, //
tfield.template data_ptr<float>(), //
*m_map, //
m_device_tensor_stride.cbegin(), //
m_byte_allocator, //
tfield.options());
default:
ASSERT(false, "Unsupported float type");
}
}
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
template <typename coordinate_field_type>
std::pair<at::Tensor, at::Tensor>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::field_map(
coordinate_field_type const *p_tfield, size_type const num_tfield) const {
index_type const unused_key = std::numeric_limits<index_type>::max();
LOG_DEBUG("map size", m_size);
return detail::field_map_type<coordinate_type, index_type, size_type,
index_type, coordinate_field_type, map_type,
TemplatedAllocator<char>>(
num_tfield, //
m_coordinate_size, //
unused_key, //
p_tfield, //
*m_map, //
m_device_tensor_stride.cbegin(), //
m_byte_allocator);
}
/**
* Union map
*/
namespace detail {
template <typename coordinate_type, //
typename size_type, //
typename index_type, //
typename tensor_type, //
typename map_type>
__global__ void
union_map_kernel(size_type const num_threads, //
map_type const __restrict__ in_map, //
map_type const __restrict__ union_map, //
index_type const *const __restrict__ in_valid_map_index, //
tensor_type *__restrict__ p_in_maps, //
tensor_type *__restrict__ p_union_maps,
size_type const coordinate_size) {
auto const tx = threadIdx.x;
auto const bx = blockIdx.x;
auto const x = blockDim.x * bx + tx;
if (x < num_threads) {
typename map_type::value_type const &in_value =
in_map.data()[in_valid_map_index[x]];
auto union_iter = union_map.find(in_value.first);
p_in_maps[x] = in_value.second;
p_union_maps[x] = union_iter->second;
}
}
} // namespace detail
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
std::vector<at::Tensor>
CoordinateMapGPU<coordinate_type, TemplatedAllocator>::union_map(
std::vector<std::reference_wrapper<self_type>> const &in_maps,
uint32_t thread_dim) const {
auto options = torch::TensorOptions({at::kCUDA, at::cuda::current_device()})
.dtype(torch::kInt64)
.requires_grad(false);
std::vector<at::Tensor> union_maps;
for (self_type const &in_map : in_maps) {
size_type const num_threads = in_map.m_valid_map_index.size();
auto const num_blocks = GET_BLOCKS(num_threads, thread_dim);
at::Tensor curr_map = torch::empty({2, num_threads}, options);
LOG_DEBUG("in_map size", num_threads, ", num block", num_blocks,
", threads dim", thread_dim);
int64_t *d_in_map = curr_map.template data_ptr<int64_t>();
detail::union_map_kernel<coordinate_type, size_type, index_type, int64_t,
map_type>
<<<num_blocks, thread_dim>>>(num_threads, //
*in_map.m_map, //
*m_map, //
in_map.m_valid_map_index.cbegin(), //
d_in_map, //
d_in_map + num_threads, //
m_coordinate_size);
CUDA_CHECK(cudaStreamSynchronize(0));
union_maps.push_back(std::move(curr_map));
}
return union_maps;
}
// Helper functions
template <typename coordinate_type,
template <typename T> class TemplatedAllocator>
void CoordinateMapGPU<coordinate_type, TemplatedAllocator>::copy_coordinates(
coordinate_type *dst_coordinate) const {
size_type const num_threads = size();
if (num_threads <= 0)
return;
// Copy by offset
// size_type const num_blocks = GET_BLOCKS(num_threads, CUDA_NUM_THREADS);
// detail::copy_coordinates_by_offset<coordinate_type, size_type, index_type,
// map_type>
// <<<num_blocks, CUDA_NUM_THREADS>>>(
// *m_map, //
// dst_coordinate, //
// m_valid_map_index.data(), //
// num_threads, //
// m_coordinate_size);
size_type const num_blocks =
GET_BLOCKS(num_threads * m_coordinate_size, CUDA_NUM_THREADS);
detail::copy_coordinates_by_valid_row<coordinate_type, size_type, index_type,
map_type>
<<<num_blocks, CUDA_NUM_THREADS>>>(
// *m_map, //
const_coordinate_data(), //
dst_coordinate, //
m_valid_row_index.cbegin(), //
num_threads * m_coordinate_size, //
m_coordinate_size);
}
// Template instantiation
template class CoordinateFieldMapGPU<default_types::ccoordinate_type,
default_types::dcoordinate_type,
detail::default_allocator>;
template class CoordinateFieldMapGPU<default_types::ccoordinate_type,
default_types::dcoordinate_type,
detail::c10_allocator>;
template class CoordinateMapGPU<default_types::dcoordinate_type,
detail::default_allocator>;
template class CoordinateMapGPU<default_types::dcoordinate_type,
detail::c10_allocator>;
template std::pair<
gpu_storage<default_types::index_type, detail::default_allocator<char>>,
gpu_storage<default_types::index_type, detail::default_allocator<char>>>
CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>::
insert_and_map<true>(
coordinate_iterator<default_types::dcoordinate_type> key_first,
coordinate_iterator<default_types::dcoordinate_type> key_last);
template std::pair<
gpu_storage<default_types::index_type, detail::default_allocator<char>>,
gpu_storage<default_types::index_type, detail::default_allocator<char>>>
CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>::
insert_and_map<false>(
coordinate_iterator<default_types::dcoordinate_type> key_first,
coordinate_iterator<default_types::dcoordinate_type> key_last);
template std::pair<
gpu_storage<default_types::index_type, detail::c10_allocator<char>>,
gpu_storage<default_types::index_type, detail::c10_allocator<char>>>
CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>::
insert_and_map<true>(
coordinate_iterator<default_types::dcoordinate_type> key_first,
coordinate_iterator<default_types::dcoordinate_type> key_last);
template std::pair<
gpu_storage<default_types::index_type, detail::c10_allocator<char>>,
gpu_storage<default_types::index_type, detail::c10_allocator<char>>>
CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>::
insert_and_map<false>(
coordinate_iterator<default_types::dcoordinate_type> key_first,
coordinate_iterator<default_types::dcoordinate_type> key_last);
template std::pair<at::Tensor, at::Tensor>
CoordinateMapGPU<default_types::dcoordinate_type, detail::default_allocator>::
field_map<float>(float const *p_tfield,
default_types::size_type const num_tfield) const;
template std::pair<at::Tensor, at::Tensor>
CoordinateMapGPU<default_types::dcoordinate_type, detail::c10_allocator>::
field_map<float>(float const *p_tfield,
default_types::size_type const num_tfield) const;
} // namespace minkowski
|
fefe7b75a201f8c1ee2f205d6a67906e46fc4746.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_rd_streams, unsigned addr1, unsigned addr2, unsigned addr3, unsigned addr4, unsigned addr5, unsigned addr6, unsigned addr7, unsigned addr8, unsigned num_wr_streams, int dummy, float *x)
{
__shared__ float A[1000];
int id = blockIdx.x*blockDim.x + threadIdx.x;
float a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0;
for (int i = 0; i < 1000 - 8; i += 8) {
a = A[id + 8*i*dummy];
b = A[id + 1*i*dummy];
c = A[id + 2*i*dummy];
d = A[id + 3*i*dummy];
e = A[id + 4*i*dummy];
f = A[id + 5*i*dummy];
g = A[id + 6*i*dummy];
h = A[id + 7*i*dummy];
}
x[id] = a + b + c + d + e + f + g + h;
}
int main(int argc, char *argv[])
{
int N = 1000;
// Perform SAXPY on 1M elements
float *h_x = (float *)malloc(N*sizeof(float));
float *d_x = (float *)100;
float *d_x_copy;
hipMalloc(&d_x_copy, N*sizeof(float));
// hipMalloc(&d_x, 2*sizeof(float));
for (int i = 1 ; i <= N ; i++)
h_x[i-1] = (float)i;
hipMemcpy(d_x, h_x, N*sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( saxpy), dim3(1), dim3(8), 0, 0, 8, 100, 100, 100, 100, 100, 100, 100, 100, 0, atoi(argv[1]), d_x);
hipMemcpy(h_x, d_x, sizeof(float), hipMemcpyDeviceToHost);
printf("%f\n", *h_x);
}
| fefe7b75a201f8c1ee2f205d6a67906e46fc4746.cu | #include <stdio.h>
#include <stdlib.h>
#include <unistd.h>
__global__ void saxpy(unsigned num_rd_streams, unsigned addr1, unsigned addr2, unsigned addr3, unsigned addr4, unsigned addr5, unsigned addr6, unsigned addr7, unsigned addr8, unsigned num_wr_streams, int dummy, float *x)
{
__shared__ float A[1000];
int id = blockIdx.x*blockDim.x + threadIdx.x;
float a = 0, b = 0, c = 0, d = 0, e = 0, f = 0, g = 0, h = 0;
for (int i = 0; i < 1000 - 8; i += 8) {
a = A[id + 8*i*dummy];
b = A[id + 1*i*dummy];
c = A[id + 2*i*dummy];
d = A[id + 3*i*dummy];
e = A[id + 4*i*dummy];
f = A[id + 5*i*dummy];
g = A[id + 6*i*dummy];
h = A[id + 7*i*dummy];
}
x[id] = a + b + c + d + e + f + g + h;
}
int main(int argc, char *argv[])
{
int N = 1000;
// Perform SAXPY on 1M elements
float *h_x = (float *)malloc(N*sizeof(float));
float *d_x = (float *)100;
float *d_x_copy;
cudaMalloc(&d_x_copy, N*sizeof(float));
// cudaMalloc(&d_x, 2*sizeof(float));
for (int i = 1 ; i <= N ; i++)
h_x[i-1] = (float)i;
cudaMemcpy(d_x, h_x, N*sizeof(float), cudaMemcpyHostToDevice);
saxpy<<<1, 8>>>(8, 100, 100, 100, 100, 100, 100, 100, 100, 0, atoi(argv[1]), d_x);
cudaMemcpy(h_x, d_x, sizeof(float), cudaMemcpyDeviceToHost);
printf("%f\n", *h_x);
}
|
5862cbcee4346dfb6b433250db9ca9bcbbafcc61.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
Recursive Gaussian filter
*/
#ifndef _GAUSSIAN_KERNEL_H_
#define _GAUSSIAN_KERNEL_H_
#include "cutil_math.h"
texture<uchar4, 2, hipReadModeNormalizedFloat> tex;
#define BLOCK_DIM 16
#define CLAMP_TO_EDGE 1
// Transpose kernel (see transpose SDK sample for details)
extern "C"
__global__ void d_transpose(uint *odata, uint *idata, int width, int height)
{
__shared__ uint block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
// RGBA version
// reads from 32-bit uint array holding 8-bit RGBA
// convert floating point rgba color to 32-bit integer
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
// convert from 32-bit int to float4
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) / 255.0f;
rgba.y = ((c>>8) & 0xff) / 255.0f;
rgba.z = ((c>>16) & 0xff) / 255.0f;
rgba.w = ((c>>24) & 0xff) / 255.0f;
return rgba;
}
// simple 1st order recursive filter
// processes one column per thread
extern "C"
__global__ void
d_simpleRecursive_rgba(uint *id, uint *od, int w, int h, float a)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id += x; // advance pointers to correct column
od += x;
// forward pass
float4 yp = rgbaIntToFloat(*id); // previous output
for (int y = 0; y < h; y++) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = xc + a*(yp - xc); // simple lerp between current and previous value
*od = rgbaFloatToInt(yc);
id += w; od += w; // move to next row
yp = yc;
}
// reset pointers to point to last element in column
id -= w;
od -= w;
// reverse pass
// ensures response is symmetrical
yp = rgbaIntToFloat(*id);
for (int y = h-1; y >= 0; y--) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = xc + a*(yp - xc);
*od = rgbaFloatToInt((rgbaIntToFloat(*od) + yc)*0.5f);
id -= w; od -= w; // move to previous row
yp = yc;
}
}
// recursive Gaussian filter
extern "C"
__global__ void
d_recursiveGaussian_rgba(uint *id, uint *od, int w, int h, float a0, float a1, float a2, float a3, float b1, float b2, float coefp, float coefn)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id += x; // advance pointers to correct column
od += x;
// forward pass
float4 xp = make_float4(0.0f); // previous input
float4 yp = make_float4(0.0f); // previous output
float4 yb = make_float4(0.0f); // previous output by 2
#if CLAMP_TO_EDGE
xp = rgbaIntToFloat(*id); yb = coefp*xp; yp = yb;
#endif
for (int y = 0; y < h; y++) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = a0*xc + a1*xp - b1*yp - b2*yb;
*od = rgbaFloatToInt(yc);
id += w; od += w; // move to next row
xp = xc; yb = yp; yp = yc;
}
// reset pointers to point to last element in column
id -= w;
od -= w;
// reverse pass
// ensures response is symmetrical
float4 xn = make_float4(0.0f);
float4 xa = make_float4(0.0f);
float4 yn = make_float4(0.0f);
float4 ya = make_float4(0.0f);
#if CLAMP_TO_EDGE
xn = xa = rgbaIntToFloat(*id); yn = coefn*xn; ya = yn;
#endif
for (int y = h-1; y >= 0; y--) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = a2*xn + a3*xa - b1*yn - b2*ya;
xa = xn; xn = xc; ya = yn; yn = yc;
*od = rgbaFloatToInt(rgbaIntToFloat(*od) + yc);
id -= w; od -= w; // move to previous row
}
}
#endif // #ifndef _GAUSSIAN_KERNEL_H_
| 5862cbcee4346dfb6b433250db9ca9bcbbafcc61.cu | /*
Recursive Gaussian filter
*/
#ifndef _GAUSSIAN_KERNEL_H_
#define _GAUSSIAN_KERNEL_H_
#include "cutil_math.h"
texture<uchar4, 2, cudaReadModeNormalizedFloat> tex;
#define BLOCK_DIM 16
#define CLAMP_TO_EDGE 1
// Transpose kernel (see transpose SDK sample for details)
extern "C"
__global__ void d_transpose(uint *odata, uint *idata, int width, int height)
{
__shared__ uint block[BLOCK_DIM][BLOCK_DIM+1];
// read the matrix tile into shared memory
unsigned int xIndex = blockIdx.x * BLOCK_DIM + threadIdx.x;
unsigned int yIndex = blockIdx.y * BLOCK_DIM + threadIdx.y;
if((xIndex < width) && (yIndex < height))
{
unsigned int index_in = yIndex * width + xIndex;
block[threadIdx.y][threadIdx.x] = idata[index_in];
}
__syncthreads();
// write the transposed matrix tile to global memory
xIndex = blockIdx.y * BLOCK_DIM + threadIdx.x;
yIndex = blockIdx.x * BLOCK_DIM + threadIdx.y;
if((xIndex < height) && (yIndex < width))
{
unsigned int index_out = yIndex * height + xIndex;
odata[index_out] = block[threadIdx.x][threadIdx.y];
}
}
// RGBA version
// reads from 32-bit uint array holding 8-bit RGBA
// convert floating point rgba color to 32-bit integer
__device__ uint rgbaFloatToInt(float4 rgba)
{
rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0]
rgba.y = __saturatef(rgba.y);
rgba.z = __saturatef(rgba.z);
rgba.w = __saturatef(rgba.w);
return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255);
}
// convert from 32-bit int to float4
__device__ float4 rgbaIntToFloat(uint c)
{
float4 rgba;
rgba.x = (c & 0xff) / 255.0f;
rgba.y = ((c>>8) & 0xff) / 255.0f;
rgba.z = ((c>>16) & 0xff) / 255.0f;
rgba.w = ((c>>24) & 0xff) / 255.0f;
return rgba;
}
// simple 1st order recursive filter
// processes one column per thread
extern "C"
__global__ void
d_simpleRecursive_rgba(uint *id, uint *od, int w, int h, float a)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id += x; // advance pointers to correct column
od += x;
// forward pass
float4 yp = rgbaIntToFloat(*id); // previous output
for (int y = 0; y < h; y++) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = xc + a*(yp - xc); // simple lerp between current and previous value
*od = rgbaFloatToInt(yc);
id += w; od += w; // move to next row
yp = yc;
}
// reset pointers to point to last element in column
id -= w;
od -= w;
// reverse pass
// ensures response is symmetrical
yp = rgbaIntToFloat(*id);
for (int y = h-1; y >= 0; y--) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = xc + a*(yp - xc);
*od = rgbaFloatToInt((rgbaIntToFloat(*od) + yc)*0.5f);
id -= w; od -= w; // move to previous row
yp = yc;
}
}
// recursive Gaussian filter
extern "C"
__global__ void
d_recursiveGaussian_rgba(uint *id, uint *od, int w, int h, float a0, float a1, float a2, float a3, float b1, float b2, float coefp, float coefn)
{
unsigned int x = blockIdx.x*blockDim.x + threadIdx.x;
id += x; // advance pointers to correct column
od += x;
// forward pass
float4 xp = make_float4(0.0f); // previous input
float4 yp = make_float4(0.0f); // previous output
float4 yb = make_float4(0.0f); // previous output by 2
#if CLAMP_TO_EDGE
xp = rgbaIntToFloat(*id); yb = coefp*xp; yp = yb;
#endif
for (int y = 0; y < h; y++) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = a0*xc + a1*xp - b1*yp - b2*yb;
*od = rgbaFloatToInt(yc);
id += w; od += w; // move to next row
xp = xc; yb = yp; yp = yc;
}
// reset pointers to point to last element in column
id -= w;
od -= w;
// reverse pass
// ensures response is symmetrical
float4 xn = make_float4(0.0f);
float4 xa = make_float4(0.0f);
float4 yn = make_float4(0.0f);
float4 ya = make_float4(0.0f);
#if CLAMP_TO_EDGE
xn = xa = rgbaIntToFloat(*id); yn = coefn*xn; ya = yn;
#endif
for (int y = h-1; y >= 0; y--) {
float4 xc = rgbaIntToFloat(*id);
float4 yc = a2*xn + a3*xa - b1*yn - b2*ya;
xa = xn; xn = xc; ya = yn; yn = yc;
*od = rgbaFloatToInt(rgbaIntToFloat(*od) + yc);
id -= w; od -= w; // move to previous row
}
}
#endif // #ifndef _GAUSSIAN_KERNEL_H_
|
d61b3f14a17516acfd7dd7dc7e354dd58e0c0d5d.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#define CUDA_NUM_THREADS 1024
inline int GET_BLOCKS(int n)
{
return (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
extern "C" __global__ void conv2d_naive_forward_kernel(
const ${Dtype} *const bottom_data, const ${Dtype} *const weight_data, ${Dtype} *const top_data)
{
CUDA_KERNEL_LOOP(index, ${nthreads})
{
// 4D coordinates
const int oN = index / ${top_width} / ${top_height} / ${out_channels};
const int oC = index / ${top_width} / ${top_height} % ${out_channels};
const int oH = index / ${top_width} % ${top_height};
const int oW = index % ${top_width};
// weight & image offset
const ${Dtype} *weight = weight_data + oC * ${in_channels} * ${kernel_h} * ${kernel_w};
const int image_offset0 = oN * ${in_channels} * ${bottom_height} * ${bottom_width};
${Dtype} value = 0;
// main loop
for (int iicc = 0; iicc < ${in_channels}; iicc++)
{
const int image_offset1 = image_offset0 + iicc * ${bottom_height} * ${bottom_width};
#pragma unroll
for (int kkhh = 0; kkhh < ${kernel_h}; kkhh++)
{
#pragma unroll
for (int kkww = 0; kkww < ${kernel_w}; kkww++)
{
const int h_in = -${pad_h} + oH * ${stride_h} + kkhh * ${dilation_h};
const int w_in = -${pad_w} + oW * ${stride_w} + kkww * ${dilation_w};
if ((h_in >= 0) && (h_in < ${bottom_height}) && (w_in >= 0) && (w_in < ${bottom_width}))
{
const int offset = image_offset1 + h_in * ${bottom_width} + w_in;
value += (*weight) * bottom_data[offset];
}
weight++;
}
}
}
top_data[index] = value;
}
}
| d61b3f14a17516acfd7dd7dc7e354dd58e0c0d5d.cu | #define CUDA_KERNEL_LOOP(i, n) \
for (int i = blockDim.x * blockIdx.x + threadIdx.x; i < (n); i += blockDim.x * gridDim.x)
#define CUDA_NUM_THREADS 1024
inline int GET_BLOCKS(int n)
{
return (n + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS;
}
extern "C" __global__ void conv2d_naive_forward_kernel(
const ${Dtype} *const bottom_data, const ${Dtype} *const weight_data, ${Dtype} *const top_data)
{
CUDA_KERNEL_LOOP(index, ${nthreads})
{
// 4D coordinates
const int oN = index / ${top_width} / ${top_height} / ${out_channels};
const int oC = index / ${top_width} / ${top_height} % ${out_channels};
const int oH = index / ${top_width} % ${top_height};
const int oW = index % ${top_width};
// weight & image offset
const ${Dtype} *weight = weight_data + oC * ${in_channels} * ${kernel_h} * ${kernel_w};
const int image_offset0 = oN * ${in_channels} * ${bottom_height} * ${bottom_width};
${Dtype} value = 0;
// main loop
for (int iicc = 0; iicc < ${in_channels}; iicc++)
{
const int image_offset1 = image_offset0 + iicc * ${bottom_height} * ${bottom_width};
#pragma unroll
for (int kkhh = 0; kkhh < ${kernel_h}; kkhh++)
{
#pragma unroll
for (int kkww = 0; kkww < ${kernel_w}; kkww++)
{
const int h_in = -${pad_h} + oH * ${stride_h} + kkhh * ${dilation_h};
const int w_in = -${pad_w} + oW * ${stride_w} + kkww * ${dilation_w};
if ((h_in >= 0) && (h_in < ${bottom_height}) && (w_in >= 0) && (w_in < ${bottom_width}))
{
const int offset = image_offset1 + h_in * ${bottom_width} + w_in;
value += (*weight) * bottom_data[offset];
}
weight++;
}
}
}
top_data[index] = value;
}
}
|
93cc4d07cd667b518dcf13459af25ace2725c861.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <time.h>
void naiveMultiply(float *a, float *b, float *c, int M, int N, int w)
{
for (int row = 0; row < M; ++row)
for (int col = 0; col < N; ++col)
{
float sum = 0.0f;
for (int i = 0; i < w; ++i)
{
sum += a[row*w+i] * b[i*N+col];
}
c[row*N+col] = sum;
}
}
template <int TILE_DIM> __global__ void simpleMultiply(float *a, float *b, float *c, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
for (int i = 0; i < TILE_DIM; ++i)
{
sum += a[row*TILE_DIM+i] * b[i*N+col];
}
c[row*N+col] = sum;
}
template <int TILE_DIM> __global__ void coalescedMultiply(float *a, float *b, float *c, int N)
{
__shared__ float aTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
aTile[threadIdx.y][threadIdx.x] = a[row*TILE_DIM+threadIdx.x];
for (int i = 0; i < TILE_DIM; i++)
{
sum += aTile[threadIdx.y][i]* b[i*N+col];
}
c[row*N+col] = sum;
}
template <int TILE_DIM> __global__ void sharedABMultiply(float *a, float *b, float *c, int N)
{
__shared__ float aTile[TILE_DIM][TILE_DIM];
__shared__ float bTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
aTile[threadIdx.y][threadIdx.x] = a[row*TILE_DIM+threadIdx.x];
bTile[threadIdx.y][threadIdx.x] = b[threadIdx.y*N+col];
__syncthreads();
for (int i = 0; i < TILE_DIM; ++i)
{
sum += aTile[threadIdx.y][i]* bTile[i][threadIdx.x];
}
c[row*N+col] = sum;
}
template <int TILE_DIM> __global__ void sharedABUnrolledMultiply(float *a, float *b, float *c, int N)
{
__shared__ float aTile[TILE_DIM][TILE_DIM];
__shared__ float bTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
aTile[threadIdx.y][threadIdx.x] = a[row*TILE_DIM+threadIdx.x];
bTile[threadIdx.y][threadIdx.x] = b[threadIdx.y*N+col];
__syncthreads();
// Unrolling is possible provided that TILE_DIM is known at compile time.
#pragma unroll
for (int i = 0; i < TILE_DIM; ++i)
{
sum += aTile[threadIdx.y][i]* bTile[i][threadIdx.x];
}
c[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
// Initialize constants.
const int w = 32;
const int M = w * 59;
const int N = w * 53;
size_t numElementsA = M * w;
size_t numElementsB = w * N;
size_t numElementsC = M * N;
size_t numBytesA = sizeof(float) * numElementsA;
size_t numBytesB = sizeof(float) * numElementsB;
size_t numBytesC = sizeof(float) * numElementsC;
// Allocate matrices a, b and c in host memory.
float *h_a = (float *)malloc(numBytesA);
float *h_b = (float *)malloc(numBytesB);
float *h_c = (float *)malloc(numBytesC);
float *h_r = (float *)malloc(numBytesC);
// Initialize matrices a and b.
srand(time(0));
for (int i = 0; i < numElementsA; ++i)
{
h_a[i] = rand() / (float)RAND_MAX;
}
for (int i = 0; i < numElementsB; ++i)
{
h_b[i] = rand() / (float)RAND_MAX;
}
// Compute a reference answer in host.
naiveMultiply(h_a, h_b, h_r, M, N, w);
// Allocate matrices a, b and c in device memory.
float *d_a, *d_b, *d_c;
hipMalloc((void **)&d_a, numBytesA);
hipMalloc((void **)&d_b, numBytesB);
hipMalloc((void **)&d_c, numBytesC);
// Copy matrices a and b from host memory to device memory.
hipMemcpy(d_a, h_a, numBytesA, hipMemcpyHostToDevice);
hipMemcpy(d_b, h_b, numBytesB, hipMemcpyHostToDevice);
// Warm up the device.
dim3 numThreadsPerBlock(w, w);
dim3 numBlocksPerGrid(N / numThreadsPerBlock.x, M / numThreadsPerBlock.y);
hipLaunchKernelGGL(( simpleMultiply<w>), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), 0, 0, d_a, d_b, d_c, N);
hipDeviceSynchronize();
// Create events to record timing data.
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
// Record an event before kernel invocations.
hipEventRecord(start);
// Invoke the kernel for a number of iterations.
int numIterations = 300;
for (int i = 0; i < numIterations; ++i)
{
hipLaunchKernelGGL(( simpleMultiply<w>), dim3(numBlocksPerGrid), dim3(numThreadsPerBlock), 0, 0, d_a, d_b, d_c, N);
}
// Record an event after kernel invocations.
hipEventRecord(stop);
// Wait for the event to complete.
hipEventSynchronize(stop);
// Compute the elapsed time between two events.
float elapsed;
hipEventElapsedTime(&elapsed, start, stop);
// Compute and print the GLOPS/s performance metric.
printf("%.2f GFLOP/s\n", (2.0f * M * N * w * numIterations * 1e-9f) / (elapsed / 1000.0f));
// Copy matrix c from device memory to host memory synchronously.
hipMemcpy(h_c, d_c, numBytesC, hipMemcpyDeviceToHost);
// Validate the result.
for (int i = 0; i < numElementsC; ++i)
{
float actual = h_c[i];
float expected = h_r[i];
if (fabs(actual - expected) / w > 1e-6)
{
printf("h_c[%d] = %f, expected = %f\n", i, actual, expected);
break;
}
}
// Cleanup.
hipFree(d_c);
hipFree(d_b);
hipFree(d_a);
hipDeviceReset();
free(h_r);
free(h_c);
free(h_b);
free(h_a);
}
| 93cc4d07cd667b518dcf13459af25ace2725c861.cu | #include <stdio.h>
#include <time.h>
void naiveMultiply(float *a, float *b, float *c, int M, int N, int w)
{
for (int row = 0; row < M; ++row)
for (int col = 0; col < N; ++col)
{
float sum = 0.0f;
for (int i = 0; i < w; ++i)
{
sum += a[row*w+i] * b[i*N+col];
}
c[row*N+col] = sum;
}
}
template <int TILE_DIM> __global__ void simpleMultiply(float *a, float *b, float *c, int N)
{
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
for (int i = 0; i < TILE_DIM; ++i)
{
sum += a[row*TILE_DIM+i] * b[i*N+col];
}
c[row*N+col] = sum;
}
template <int TILE_DIM> __global__ void coalescedMultiply(float *a, float *b, float *c, int N)
{
__shared__ float aTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
aTile[threadIdx.y][threadIdx.x] = a[row*TILE_DIM+threadIdx.x];
for (int i = 0; i < TILE_DIM; i++)
{
sum += aTile[threadIdx.y][i]* b[i*N+col];
}
c[row*N+col] = sum;
}
template <int TILE_DIM> __global__ void sharedABMultiply(float *a, float *b, float *c, int N)
{
__shared__ float aTile[TILE_DIM][TILE_DIM];
__shared__ float bTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
aTile[threadIdx.y][threadIdx.x] = a[row*TILE_DIM+threadIdx.x];
bTile[threadIdx.y][threadIdx.x] = b[threadIdx.y*N+col];
__syncthreads();
for (int i = 0; i < TILE_DIM; ++i)
{
sum += aTile[threadIdx.y][i]* bTile[i][threadIdx.x];
}
c[row*N+col] = sum;
}
template <int TILE_DIM> __global__ void sharedABUnrolledMultiply(float *a, float *b, float *c, int N)
{
__shared__ float aTile[TILE_DIM][TILE_DIM];
__shared__ float bTile[TILE_DIM][TILE_DIM];
int row = blockIdx.y * blockDim.y + threadIdx.y;
int col = blockIdx.x * blockDim.x + threadIdx.x;
float sum = 0.0f;
aTile[threadIdx.y][threadIdx.x] = a[row*TILE_DIM+threadIdx.x];
bTile[threadIdx.y][threadIdx.x] = b[threadIdx.y*N+col];
__syncthreads();
// Unrolling is possible provided that TILE_DIM is known at compile time.
#pragma unroll
for (int i = 0; i < TILE_DIM; ++i)
{
sum += aTile[threadIdx.y][i]* bTile[i][threadIdx.x];
}
c[row*N+col] = sum;
}
int main(int argc, char *argv[])
{
// Initialize constants.
const int w = 32;
const int M = w * 59;
const int N = w * 53;
size_t numElementsA = M * w;
size_t numElementsB = w * N;
size_t numElementsC = M * N;
size_t numBytesA = sizeof(float) * numElementsA;
size_t numBytesB = sizeof(float) * numElementsB;
size_t numBytesC = sizeof(float) * numElementsC;
// Allocate matrices a, b and c in host memory.
float *h_a = (float *)malloc(numBytesA);
float *h_b = (float *)malloc(numBytesB);
float *h_c = (float *)malloc(numBytesC);
float *h_r = (float *)malloc(numBytesC);
// Initialize matrices a and b.
srand(time(0));
for (int i = 0; i < numElementsA; ++i)
{
h_a[i] = rand() / (float)RAND_MAX;
}
for (int i = 0; i < numElementsB; ++i)
{
h_b[i] = rand() / (float)RAND_MAX;
}
// Compute a reference answer in host.
naiveMultiply(h_a, h_b, h_r, M, N, w);
// Allocate matrices a, b and c in device memory.
float *d_a, *d_b, *d_c;
cudaMalloc((void **)&d_a, numBytesA);
cudaMalloc((void **)&d_b, numBytesB);
cudaMalloc((void **)&d_c, numBytesC);
// Copy matrices a and b from host memory to device memory.
cudaMemcpy(d_a, h_a, numBytesA, cudaMemcpyHostToDevice);
cudaMemcpy(d_b, h_b, numBytesB, cudaMemcpyHostToDevice);
// Warm up the device.
dim3 numThreadsPerBlock(w, w);
dim3 numBlocksPerGrid(N / numThreadsPerBlock.x, M / numThreadsPerBlock.y);
simpleMultiply<w><<<numBlocksPerGrid, numThreadsPerBlock>>>(d_a, d_b, d_c, N);
cudaDeviceSynchronize();
// Create events to record timing data.
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
// Record an event before kernel invocations.
cudaEventRecord(start);
// Invoke the kernel for a number of iterations.
int numIterations = 300;
for (int i = 0; i < numIterations; ++i)
{
simpleMultiply<w><<<numBlocksPerGrid, numThreadsPerBlock>>>(d_a, d_b, d_c, N);
}
// Record an event after kernel invocations.
cudaEventRecord(stop);
// Wait for the event to complete.
cudaEventSynchronize(stop);
// Compute the elapsed time between two events.
float elapsed;
cudaEventElapsedTime(&elapsed, start, stop);
// Compute and print the GLOPS/s performance metric.
printf("%.2f GFLOP/s\n", (2.0f * M * N * w * numIterations * 1e-9f) / (elapsed / 1000.0f));
// Copy matrix c from device memory to host memory synchronously.
cudaMemcpy(h_c, d_c, numBytesC, cudaMemcpyDeviceToHost);
// Validate the result.
for (int i = 0; i < numElementsC; ++i)
{
float actual = h_c[i];
float expected = h_r[i];
if (fabs(actual - expected) / w > 1e-6)
{
printf("h_c[%d] = %f, expected = %f\n", i, actual, expected);
break;
}
}
// Cleanup.
cudaFree(d_c);
cudaFree(d_b);
cudaFree(d_a);
cudaDeviceReset();
free(h_r);
free(h_c);
free(h_b);
free(h_a);
}
|
07efbfba5a835c32c2e581be4bdc8b306f5e27bd.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "matrix/math.cuh"
#include "random/rng.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Matrix {
template <typename Type>
__global__ void nativePowerKernel(Type *in, Type *out, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in[idx] * in[idx];
}
}
template <typename Type>
void naivePower(Type *in, Type *out, int len, hipStream_t stream) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
hipLaunchKernelGGL(( nativePowerKernel<Type>), dim3(nblks), dim3(TPB), 0, stream, in, out, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename Type>
__global__ void nativeSqrtKernel(Type *in, Type *out, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = sqrt(in[idx]);
}
}
template <typename Type>
void naiveSqrt(Type *in, Type *out, int len) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
hipLaunchKernelGGL(( nativeSqrtKernel<Type>), dim3(nblks), dim3(TPB), 0, 0, in, out, len);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename Type>
__global__ void naiveSignFlipKernel(Type *in, Type *out, int rowCount,
int colCount) {
int d_i = blockIdx.x * rowCount;
int end = d_i + rowCount;
if (blockIdx.x < colCount) {
Type max = 0.0;
int max_index = 0;
for (int i = d_i; i < end; i++) {
Type val = in[i];
if (val < 0.0) {
val = -val;
}
if (val > max) {
max = val;
max_index = i;
}
}
for (int i = d_i; i < end; i++) {
if (in[max_index] < 0.0) {
out[i] = -in[i];
} else {
out[i] = in[i];
}
}
}
__syncthreads();
}
template <typename Type>
void naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) {
hipLaunchKernelGGL(( naiveSignFlipKernel<Type>), dim3(colCount), dim3(1), 0, 0, in, out, rowCount, colCount);
CUDA_CHECK(hipPeekAtLastError());
}
template <typename T>
struct MathInputs {
T tolerance;
int n_row;
int n_col;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MathInputs<T> &dims) {
return os;
}
template <typename T>
class MathTest : public ::testing::TestWithParam<MathInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MathInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
allocate(in_power, len);
allocate(out_power_ref, len);
allocate(in_sqrt, len);
allocate(out_sqrt_ref, len);
allocate(in_sign_flip, len);
allocate(out_sign_flip_ref, len);
hipStream_t stream;
CUDA_CHECK(hipStreamCreate(&stream));
allocator.reset(new defaultDeviceAllocator);
allocate(in_ratio, 4);
T in_ratio_h[4] = {1.0, 2.0, 2.0, 3.0};
updateDevice(in_ratio, in_ratio_h, 4, stream);
allocate(out_ratio_ref, 4);
T out_ratio_ref_h[4] = {0.125, 0.25, 0.25, 0.375};
updateDevice(out_ratio_ref, out_ratio_ref_h, 4, stream);
r.uniform(in_power, len, T(-1.0), T(1.0), stream);
r.uniform(in_sqrt, len, T(0.0), T(1.0), stream);
// r.uniform(in_ratio, len, T(0.0), T(1.0));
r.uniform(in_sign_flip, len, T(-100.0), T(100.0), stream);
naivePower(in_power, out_power_ref, len, stream);
power(in_power, len, stream);
naiveSqrt(in_sqrt, out_sqrt_ref, len);
seqRoot(in_sqrt, len, stream);
ratio(in_ratio, in_ratio, 4, allocator, stream);
naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col);
signFlip(in_sign_flip, params.n_row, params.n_col, stream);
allocate(in_recip, 4);
allocate(in_recip_ref, 4);
allocate(out_recip, 4);
// default threshold is 1e-15
std::vector<T> in_recip_h = {0.1, 0.01, -0.01, 0.1e-16};
std::vector<T> in_recip_ref_h = {10.0, 100.0, -100.0, 0.0};
updateDevice(in_recip, in_recip_h.data(), 4, stream);
updateDevice(in_recip_ref, in_recip_ref_h.data(), 4, stream);
T recip_scalar = T(1.0);
// this `reciprocal()` has to go first bc next one modifies its input
reciprocal(in_recip, out_recip, recip_scalar, 4, stream);
reciprocal(in_recip, recip_scalar, 4, stream, true);
std::vector<T> in_small_val_zero_h = {0.1, 1e-16, -1e-16, -0.1};
std::vector<T> in_small_val_zero_ref_h = {0.1, 0.0, 0.0, -0.1};
allocate(in_smallzero, 4);
allocate(out_smallzero, 4);
allocate(out_smallzero_ref, 4);
updateDevice(in_smallzero, in_small_val_zero_h.data(), 4, stream);
updateDevice(out_smallzero_ref, in_small_val_zero_ref_h.data(), 4, stream);
setSmallValuesZero(out_smallzero, in_smallzero, 4, stream);
setSmallValuesZero(in_smallzero, 4, stream);
CUDA_CHECK(hipStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(hipFree(in_power));
CUDA_CHECK(hipFree(out_power_ref));
CUDA_CHECK(hipFree(in_sqrt));
CUDA_CHECK(hipFree(out_sqrt_ref));
CUDA_CHECK(hipFree(in_ratio));
CUDA_CHECK(hipFree(out_ratio_ref));
CUDA_CHECK(hipFree(in_sign_flip));
CUDA_CHECK(hipFree(out_sign_flip_ref));
CUDA_CHECK(hipFree(in_recip));
CUDA_CHECK(hipFree(in_recip_ref));
CUDA_CHECK(hipFree(out_recip));
CUDA_CHECK(hipFree(in_smallzero));
CUDA_CHECK(hipFree(out_smallzero));
CUDA_CHECK(hipFree(out_smallzero_ref));
}
protected:
MathInputs<T> params;
T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio,
*out_ratio_ref, *in_sign_flip, *out_sign_flip_ref, *in_recip, *in_recip_ref,
*out_recip, *in_smallzero, *out_smallzero, *out_smallzero_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<MathInputs<float>> inputsf = {
{0.00001f, 1024, 1024, 1024 * 1024, 1234ULL}};
const std::vector<MathInputs<double>> inputsd = {
{0.00001, 1024, 1024, 1024 * 1024, 1234ULL}};
typedef MathTest<float> MathPowerTestF;
TEST_P(MathPowerTestF, Result) {
ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathPowerTestD;
TEST_P(MathPowerTestD, Result) {
ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSqrtTestF;
TEST_P(MathSqrtTestF, Result) {
ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSqrtTestD;
TEST_P(MathSqrtTestD, Result) {
ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathRatioTestF;
TEST_P(MathRatioTestF, Result) {
ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathRatioTestD;
TEST_P(MathRatioTestD, Result) {
ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSignFlipTestF;
TEST_P(MathSignFlipTestF, Result) {
ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSignFlipTestD;
TEST_P(MathSignFlipTestD, Result) {
ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathReciprocalTestF;
TEST_P(MathReciprocalTestF, Result) {
ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,
CompareApprox<float>(params.tolerance)));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathReciprocalTestD;
TEST_P(MathReciprocalTestD, Result) {
ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,
CompareApprox<double>(params.tolerance)));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSetSmallZeroTestF;
TEST_P(MathSetSmallZeroTestF, Result) {
ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSetSmallZeroTestD;
TEST_P(MathSetSmallZeroTestD, Result) {
ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestD,
::testing::ValuesIn(inputsd));
} // end namespace Matrix
} // end namespace MLCommon
| 07efbfba5a835c32c2e581be4bdc8b306f5e27bd.cu | /*
* Copyright (c) 2018, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <common/cudart_utils.h>
#include <gtest/gtest.h>
#include "matrix/math.cuh"
#include "random/rng.cuh"
#include "test_utils.h"
namespace MLCommon {
namespace Matrix {
template <typename Type>
__global__ void nativePowerKernel(Type *in, Type *out, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = in[idx] * in[idx];
}
}
template <typename Type>
void naivePower(Type *in, Type *out, int len, cudaStream_t stream) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
nativePowerKernel<Type><<<nblks, TPB, 0, stream>>>(in, out, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename Type>
__global__ void nativeSqrtKernel(Type *in, Type *out, int len) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if (idx < len) {
out[idx] = sqrt(in[idx]);
}
}
template <typename Type>
void naiveSqrt(Type *in, Type *out, int len) {
static const int TPB = 64;
int nblks = ceildiv(len, TPB);
nativeSqrtKernel<Type><<<nblks, TPB>>>(in, out, len);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename Type>
__global__ void naiveSignFlipKernel(Type *in, Type *out, int rowCount,
int colCount) {
int d_i = blockIdx.x * rowCount;
int end = d_i + rowCount;
if (blockIdx.x < colCount) {
Type max = 0.0;
int max_index = 0;
for (int i = d_i; i < end; i++) {
Type val = in[i];
if (val < 0.0) {
val = -val;
}
if (val > max) {
max = val;
max_index = i;
}
}
for (int i = d_i; i < end; i++) {
if (in[max_index] < 0.0) {
out[i] = -in[i];
} else {
out[i] = in[i];
}
}
}
__syncthreads();
}
template <typename Type>
void naiveSignFlip(Type *in, Type *out, int rowCount, int colCount) {
naiveSignFlipKernel<Type><<<colCount, 1>>>(in, out, rowCount, colCount);
CUDA_CHECK(cudaPeekAtLastError());
}
template <typename T>
struct MathInputs {
T tolerance;
int n_row;
int n_col;
int len;
unsigned long long int seed;
};
template <typename T>
::std::ostream &operator<<(::std::ostream &os, const MathInputs<T> &dims) {
return os;
}
template <typename T>
class MathTest : public ::testing::TestWithParam<MathInputs<T>> {
protected:
void SetUp() override {
params = ::testing::TestWithParam<MathInputs<T>>::GetParam();
Random::Rng r(params.seed);
int len = params.len;
allocate(in_power, len);
allocate(out_power_ref, len);
allocate(in_sqrt, len);
allocate(out_sqrt_ref, len);
allocate(in_sign_flip, len);
allocate(out_sign_flip_ref, len);
cudaStream_t stream;
CUDA_CHECK(cudaStreamCreate(&stream));
allocator.reset(new defaultDeviceAllocator);
allocate(in_ratio, 4);
T in_ratio_h[4] = {1.0, 2.0, 2.0, 3.0};
updateDevice(in_ratio, in_ratio_h, 4, stream);
allocate(out_ratio_ref, 4);
T out_ratio_ref_h[4] = {0.125, 0.25, 0.25, 0.375};
updateDevice(out_ratio_ref, out_ratio_ref_h, 4, stream);
r.uniform(in_power, len, T(-1.0), T(1.0), stream);
r.uniform(in_sqrt, len, T(0.0), T(1.0), stream);
// r.uniform(in_ratio, len, T(0.0), T(1.0));
r.uniform(in_sign_flip, len, T(-100.0), T(100.0), stream);
naivePower(in_power, out_power_ref, len, stream);
power(in_power, len, stream);
naiveSqrt(in_sqrt, out_sqrt_ref, len);
seqRoot(in_sqrt, len, stream);
ratio(in_ratio, in_ratio, 4, allocator, stream);
naiveSignFlip(in_sign_flip, out_sign_flip_ref, params.n_row, params.n_col);
signFlip(in_sign_flip, params.n_row, params.n_col, stream);
allocate(in_recip, 4);
allocate(in_recip_ref, 4);
allocate(out_recip, 4);
// default threshold is 1e-15
std::vector<T> in_recip_h = {0.1, 0.01, -0.01, 0.1e-16};
std::vector<T> in_recip_ref_h = {10.0, 100.0, -100.0, 0.0};
updateDevice(in_recip, in_recip_h.data(), 4, stream);
updateDevice(in_recip_ref, in_recip_ref_h.data(), 4, stream);
T recip_scalar = T(1.0);
// this `reciprocal()` has to go first bc next one modifies its input
reciprocal(in_recip, out_recip, recip_scalar, 4, stream);
reciprocal(in_recip, recip_scalar, 4, stream, true);
std::vector<T> in_small_val_zero_h = {0.1, 1e-16, -1e-16, -0.1};
std::vector<T> in_small_val_zero_ref_h = {0.1, 0.0, 0.0, -0.1};
allocate(in_smallzero, 4);
allocate(out_smallzero, 4);
allocate(out_smallzero_ref, 4);
updateDevice(in_smallzero, in_small_val_zero_h.data(), 4, stream);
updateDevice(out_smallzero_ref, in_small_val_zero_ref_h.data(), 4, stream);
setSmallValuesZero(out_smallzero, in_smallzero, 4, stream);
setSmallValuesZero(in_smallzero, 4, stream);
CUDA_CHECK(cudaStreamDestroy(stream));
}
void TearDown() override {
CUDA_CHECK(cudaFree(in_power));
CUDA_CHECK(cudaFree(out_power_ref));
CUDA_CHECK(cudaFree(in_sqrt));
CUDA_CHECK(cudaFree(out_sqrt_ref));
CUDA_CHECK(cudaFree(in_ratio));
CUDA_CHECK(cudaFree(out_ratio_ref));
CUDA_CHECK(cudaFree(in_sign_flip));
CUDA_CHECK(cudaFree(out_sign_flip_ref));
CUDA_CHECK(cudaFree(in_recip));
CUDA_CHECK(cudaFree(in_recip_ref));
CUDA_CHECK(cudaFree(out_recip));
CUDA_CHECK(cudaFree(in_smallzero));
CUDA_CHECK(cudaFree(out_smallzero));
CUDA_CHECK(cudaFree(out_smallzero_ref));
}
protected:
MathInputs<T> params;
T *in_power, *out_power_ref, *in_sqrt, *out_sqrt_ref, *in_ratio,
*out_ratio_ref, *in_sign_flip, *out_sign_flip_ref, *in_recip, *in_recip_ref,
*out_recip, *in_smallzero, *out_smallzero, *out_smallzero_ref;
std::shared_ptr<deviceAllocator> allocator;
};
const std::vector<MathInputs<float>> inputsf = {
{0.00001f, 1024, 1024, 1024 * 1024, 1234ULL}};
const std::vector<MathInputs<double>> inputsd = {
{0.00001, 1024, 1024, 1024 * 1024, 1234ULL}};
typedef MathTest<float> MathPowerTestF;
TEST_P(MathPowerTestF, Result) {
ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathPowerTestD;
TEST_P(MathPowerTestD, Result) {
ASSERT_TRUE(devArrMatch(in_power, out_power_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSqrtTestF;
TEST_P(MathSqrtTestF, Result) {
ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSqrtTestD;
TEST_P(MathSqrtTestD, Result) {
ASSERT_TRUE(devArrMatch(in_sqrt, out_sqrt_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathRatioTestF;
TEST_P(MathRatioTestF, Result) {
ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathRatioTestD;
TEST_P(MathRatioTestD, Result) {
ASSERT_TRUE(devArrMatch(in_ratio, out_ratio_ref, 4,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSignFlipTestF;
TEST_P(MathSignFlipTestF, Result) {
ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSignFlipTestD;
TEST_P(MathSignFlipTestD, Result) {
ASSERT_TRUE(devArrMatch(in_sign_flip, out_sign_flip_ref, params.len,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathReciprocalTestF;
TEST_P(MathReciprocalTestF, Result) {
ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,
CompareApprox<float>(params.tolerance)));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathReciprocalTestD;
TEST_P(MathReciprocalTestD, Result) {
ASSERT_TRUE(devArrMatch(in_recip, in_recip_ref, 4,
CompareApprox<double>(params.tolerance)));
// 4-th term tests `setzero=true` functionality, not present in this version of `reciprocal`.
ASSERT_TRUE(devArrMatch(out_recip, in_recip_ref, 3,
CompareApprox<double>(params.tolerance)));
}
typedef MathTest<float> MathSetSmallZeroTestF;
TEST_P(MathSetSmallZeroTestF, Result) {
ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,
CompareApprox<float>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,
CompareApprox<float>(params.tolerance)));
}
typedef MathTest<double> MathSetSmallZeroTestD;
TEST_P(MathSetSmallZeroTestD, Result) {
ASSERT_TRUE(devArrMatch(in_smallzero, out_smallzero_ref, 4,
CompareApprox<double>(params.tolerance)));
ASSERT_TRUE(devArrMatch(out_smallzero, out_smallzero_ref, 4,
CompareApprox<double>(params.tolerance)));
}
INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathPowerTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestF, ::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSqrtTestD, ::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathRatioTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSignFlipTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathReciprocalTestD,
::testing::ValuesIn(inputsd));
INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestF,
::testing::ValuesIn(inputsf));
INSTANTIATE_TEST_CASE_P(MathTests, MathSetSmallZeroTestD,
::testing::ValuesIn(inputsd));
} // end namespace Matrix
} // end namespace MLCommon
|
f33604afbebf3c6c9dde4a0d9fcf1e7450446131.hip | // !!! This is a file automatically generated by hipify!!!
/*
Authors
Alexander Freudenberg, [email protected]
Copyright (C) 2022-2023 Alexander Freudenberg
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <chrono>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <rocblas.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <unistd.h>
#include "dgemm_compressed_cuda.h"
#include "cuda_utils.h"
//
// plink2gpu function
//
int plink2gpu(char *plink, char *plink_transposed, int snps,
int indiv, double *f, int n, void **GPU_obj) {
/*
Moves SNP matrix, its transposed and the according allele frequencies to the
device and stores the pointer to this data in a separate object, a pointer
to which is then returned.
Parameters:
plink: Pointer to SNP matrix in plink format of size ceil(indiv/4) * snps
+3
plink_transpoed: Pointer to transposed SNP matrix in plink format
f: Pointer to vector of allele frequencies
snps: Pointer to number of snps
indiv: Pointer to number of individuals
GPU_obj: void pointer to a pointer in which the GPU object is stored
*/
// Print compile info
print_compile_info("dgemm_compressed");
//
// Initialize CUDA variables
//
hipError_t err;
uint8_t *d_plink, *d_plink_transposed;
double *d_f, *d_unit, *d_C, *d_D;
double *d_B;
long n_bytes_per_snp =
(indiv - 1) / 4 + 1; // number of columns of Z if individuals
// are zero-padded to be a multiple of 4
long n_bytes_per_indiv =
(snps - 1) / 4 +
1; // number of columns of Z^T if SNPs are zero-padded to be a multiple of 4
long size_buffer = 4 * ((long(max(snps, indiv)) - 1) / 4 + 1) * long(n);
// Maximal size of the matrices B and C on the device
// Matrices are forced to have a number of rows which is a multiple of 4 by
// zero-padding This allows us to deal with SNP matrices with unaligned
// dimensions which are themselves zero-padded
debug_info("Dimensions: (%d,%d), size in bytes: %ld, size_t %d", snps, indiv, n_bytes_per_snp * long(snps) + long(indiv) * n_bytes_per_indiv, sizeof(size_t));
if(checkCuda() != 0){
return 1;
}
int device = switchDevice();
if(device == -1){
return 1;
}
// Check if enough memory is available
size_t required_mem = 3 * size_buffer * sizeof(double) + n_bytes_per_snp * long(snps) +
n_bytes_per_indiv * long(indiv);
if (checkDevMemory(required_mem) != 0) {
return 1;
}
//
// Allocate device memory
//
err = hipMalloc((void **)&d_plink, n_bytes_per_snp * long(snps));
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipMalloc((void **)&d_plink_transposed, n_bytes_per_indiv * long(indiv));
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipMalloc((void **)&d_f, sizeof(double) * snps);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipMalloc((void **)&d_unit, sizeof(double) * indiv);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipMalloc((void **)&d_B, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
err = hipMalloc((void **)&d_C, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
err = hipMalloc((void **)&d_D, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
//
// Copy data to device
//
err = hipMemcpy(d_plink, plink, long(n_bytes_per_snp) * long(snps), hipMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipMemcpy(d_plink_transposed, plink_transposed, long(n_bytes_per_indiv) * long(indiv),
hipMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipMemcpy(d_f, f, sizeof(double) * long(snps), hipMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
// Fill d_unit with 1.0s
thrust::device_ptr<double> d_unit_thrust(d_unit);
thrust::fill(d_unit_thrust, d_unit_thrust + indiv, 1.0);
err = hipGetLastError();
if (checkError(__func__, __LINE__, err) != 0)
return (1);
//
// Initialize GPU_gemm_storage object
//
struct GPU_gemm_storage *GPU_storage_obj =
(struct GPU_gemm_storage *)malloc(sizeof(struct GPU_gemm_storage));
GPU_storage_obj->d_plink = d_plink;
GPU_storage_obj->d_plink_transposed = d_plink_transposed;
GPU_storage_obj->d_f = d_f;
GPU_storage_obj->d_unit = d_unit;
GPU_storage_obj->d_B = d_B;
GPU_storage_obj->d_C = d_C;
GPU_storage_obj->d_D = d_D;
GPU_storage_obj->size_buffer = size_buffer;
GPU_storage_obj->snps = snps;
GPU_storage_obj->indiv = indiv;
GPU_storage_obj->device = device;
debug_info("Pointer d_plink %d, d_plink_transposed %d", d_plink, d_plink_transposed);
// Set pointer to initialized object
*GPU_obj = (void *)GPU_storage_obj;
return 0;
}
//
// freegpu function
//
int freegpu(void **GPU_obj){
hipError_t err;
if(checkCuda() != 0){
return 1;
}
if(*GPU_obj == NULL){
return 1;
}
// Free device memory and derefence storage object
struct GPU_gemm_storage *GPU_storage_obj = (struct GPU_gemm_storage *) (*GPU_obj);
err = hipFree(GPU_storage_obj->d_plink);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipFree(GPU_storage_obj->d_plink_transposed);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipFree(GPU_storage_obj->d_f);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipFree(GPU_storage_obj->d_unit);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipFree(GPU_storage_obj->d_B);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipFree(GPU_storage_obj->d_C);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = hipFree(GPU_storage_obj->d_D);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
free(GPU_storage_obj);
GPU_obj = NULL;
return 0;
}
//
// dgemm_compressed_gpu function
//
int dgemm_compressed_gpu(bool transA, void *GPU_obj, int n, double *B, int ldb,
int centered, int normalized, double *C, int ldc) {
/*
Performs one of the operations
C <- alpha * op(M - 2 * 1_indiv f^T) * op(B) + beta * C
on the GPU, where
op(X) = X or op(X) = X^T,
alpha and beta are scalars,
M is a compressed genotype matrix of dimensions indiv times snps stored in row-major,
f is a vector of allele frequencies of length snps,
op(B) is a matrix of double precision and number of rows equal to number of columns of op(M)
C is a matrix of double precision
Parameters:
transa: Specifies the form of op(M) used in the matrix multiplication. If
transa = true, then op(M) = M^T. If transa = false, then op(M) =
M.
m: Specifies the number of rows of op(M - 2* 1_k*f^T) and C
n: Specifies the number of columns of op(B) and C
k: Specifies the number of columns of op(M - 2* 1_k*f^T) and rows of
op(B) k1: Specifies the number of columns of op(M) in compressed format
alpha: Not supported currently, only alpha=1 is allowed. Specifies the
scalar alpha Storage: Struct which stores device pointers to both M and its
transposed as well as a device pointer to the vector of allele frequencies f
lda: Not supported currently
B: Specifies the matrix B
ldb: Not supported currently
beta: Specifies the scalar beta. When beta is
equal to zero, then C need not be set on input.
C: Specifies the matrix C
ldc: Not supported currently
A further boost in performance can be achieved if the whole PCG is transfered
to the GPU to avoid data movement.
*/
//
// Initialization
//
// Initialize helper variables
hipError_t err;
hipblasStatus_t cublas_status;
hipblasHandle_t cublas_handle;
struct GPU_gemm_storage *GPU_storage_obj = (struct GPU_gemm_storage *) GPU_obj;
// Initialize device pointer for M
// cutlass function assumes row major for M and PLINK bed uses SNP major, hence
// pointer plink is needed for transA = 't' and plink_transposed if transA =
// 'N'
uint8_t *d_M = transA ? GPU_storage_obj->d_plink : GPU_storage_obj->d_plink_transposed;
cutlass::u4f64_t *d_B = reinterpret_cast<cutlass::u4f64_t *>(GPU_storage_obj->d_B);
double *d_f = GPU_storage_obj->d_f,
*d_unit = GPU_storage_obj->d_unit;
double *d_C = GPU_storage_obj->d_C;
double *d_D = GPU_storage_obj->d_D;
long m = transA ? GPU_storage_obj->snps : GPU_storage_obj->indiv;
long k = transA ? GPU_storage_obj->indiv : GPU_storage_obj->snps;
long size_buffer = GPU_storage_obj->size_buffer;
long k1 = (k - 1) / 4 + 1;
debug_info("\tEntering GPU multiplication\n");
debug_info("Pointer: d_M %d, Dimensions: m %ld, k %ld, k1 %ld, n %ld", d_M, m, k, k1, n);
const double alpha = 1.0,
alpha_n2 = -2.0,
beta = 0.0;
double *d_workspace = NULL;
// Check CUDA installation
if(checkCuda() != 0){
return 1;
}
// Create cuBLAS handle
cublas_status = hipblasCreate(&cublas_handle);
if (checkError(__func__, __LINE__, cublas_status) != 0)
return 1;
// Zero-fill matrices C and B to avoid spurious results
err = hipMemset(d_B, 0, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
err = hipMemset(d_C, 0, sizeof(double) * err);
if (checkError(__func__, __LINE__, cublas_status) != 0)
return 1;
err = hipMemset(d_D, 0, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
// Copy data to device
debug_info("Memcpy dstpitch %d, srcpitch %d, width %d, height %d",
sizeof(double) * 4 * ((k - 1) / 4 + 1), sizeof(double) * k,
sizeof(double) * k, sizeof(double) * n);
err = hipMemcpy2D(d_B, sizeof(double) * 4 * ((k - 1) / 4 + 1), B,
sizeof(double) * k, sizeof(double) * k, n,
hipMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
//
// SNP matrix multiplication
// The following section multiplies the SNP matrix with a vector of doubles in
// cutlass
//
// Create a problem size struct for matrix multiplication
cutlass::gemm::GemmCoord problem_size_packed(m, n, k1);
// Declare Gemm problem
using CutlassGemm = typename cutlass::gemm::device::Gemm<
uint8_t, cutlass::layout::RowMajor, cutlass::u4f64_t,
cutlass::layout::ColumnMajor, double, cutlass::layout::RowMajor, double,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm61, // TODO: Check template hierachy if this is required
cutlass::gemm::GemmShape<32, 32, 16>, // Do not change
cutlass::gemm::GemmShape<16, 16, 16>, // Might be tuned but must be
// smaller than ThreadblockShape
cutlass::gemm::GemmShape<1, 1, 1>, // Do not change
cutlass::epilogue::thread::LinearCombination<double, 1, double, double>,
typename cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2,
1, 1, false,
cutlass::arch::OpMultiplyAdd // Operator
>;
debug_info("Size: %ld", k1 * n);
// Define CUTLASS GEMM arguments
typename CutlassGemm::Arguments arguments{
problem_size_packed, // problem size of matrix multiplication
{d_M, k1}, // reference to matrix M on device
{d_B, k1}, // reference to matrix B on device
{d_C, n}, // reference to matrix C on device
{d_C, n}, // reference to matrix D on device
{alpha, beta} // tuple of alpha and beta
};
// Calculate CUTLASS GEMM workspace size
size_t workspace_size = CutlassGemm::get_workspace_size(arguments);
// Allocate workspace memory
hipMalloc((void **)&d_workspace, max(workspace_size, sizeof(double) * n));
// Instantiate CUTLASS kernel depending on templates
CutlassGemm gemm_op;
// Test if problem can be implemented
cutlass::Status status = gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess)
printf("Can't implement\n");
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, d_workspace);
if (status != cutlass::Status::kSuccess)
printf("Error in initialization\n");
// Launch initialized CUTLASS kernel
status = gemm_op(); // Actual gemm op
hipDeviceSynchronize();
if (status != cutlass::Status::kSuccess)
printf("Operation error %d\n", (int) status);
// Catch all accumulated errors from previous cuda launches
err = hipGetLastError();
if (checkError(__func__, __LINE__, err) != 0)
return (1);
//
// Transpose
// Switch to column major
//
cublas_status = hipblasDgeam(cublas_handle, //
HIPBLAS_OP_T, // C needs to be transposed
HIPBLAS_OP_N, // No-op on B
m, // Number of rows of C after transposing
n, // Number of columns of C after transposing
&alpha, // alpha
d_C, // matrix A
n, // lda
&beta, // beta
d_D, // matrix B
m, // ldb
d_D, // matrix C
m); // ldb
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
//
// Genotype centering
// The following section performs genotype centering by substracting op(2 *
// 1_indiv * f^T ) B from C
//
// dgemv performs C <- alpha op(A) x + beta y
// We calculate f^T B if transa = true or 1_k^T B if transa = false
// cuBLAS only supports op(A) x, so we calculate B^T f (B^T 1_k resp.), which
// returns the same as the result is a vector
// B is of dimension (snps, n) if
// transa = 'N' and of dimension (indiv,n) if transa = true cuBLAS assumes
// column-major and d_B is stored in column-major, hence trans = HIPBLAS_OP_T
switch(centered){
case 0: break;
case 1:
{
debug_info("Centering: B^f");
cublas_status = hipblasDgemv(cublas_handle, // handle
HIPBLAS_OP_T, // trans
k, // number of rows of A
n, // number of cols of A
&alpha_n2, // alpha
reinterpret_cast<double *>(d_B), // matrix A
((k - 1) / 4 + 1) * 4, // lda
transA ? d_unit : d_f, // vector x
1, // incx
&beta, // beta
d_workspace, // vector y
1); // incy
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
debug_info("Centering: C");
// Now every column i is scaled with alpha_i 1_k if transa = true of alpha_i
// f if transa = false, where alpha_i = B_i^T f (alpha_i = B_i^T 1_k resp)
cublas_status =
hipblasSetPointerMode(cublas_handle, HIPBLAS_POINTER_MODE_DEVICE);
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
for (int i = 0; i < n; i++) {
cublas_status = hipblasDaxpy(cublas_handle, // handle
m, // number of rows
d_workspace + i, // alpha
transA ? d_f : d_unit, // x
1, // incx
d_D + i * m, // y
1); // incy
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
}
break;
}
default: checkError(__func__, __LINE__, hipErrorInvalidValue); return 1;
}
//
// Wrap-up
//
// Copy results back to the host
debug_info("Copy back");
err = hipMemcpy(C, d_D, sizeof(double) * m * n, hipMemcpyDeviceToHost);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
debug_info("Free pointers");
hipDeviceSynchronize();
cublas_status = hipblasDestroy(cublas_handle);
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
hipFree(d_workspace);
// debug_info("C: ");
// for(int i = 0; i < 10; i++){
// debug_info("%f ", C[i]);
// }
debug_info("Return");
return 0;
}
| f33604afbebf3c6c9dde4a0d9fcf1e7450446131.cu | /*
Authors
Alexander Freudenberg, [email protected]
Copyright (C) 2022-2023 Alexander Freudenberg
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
#include <chrono>
#include <cuda.h>
#include <cuda_runtime.h>
#include <cuda_runtime_api.h>
#include <cublas_v2.h>
#include <thrust/fill.h>
#include <thrust/device_vector.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string>
#include <time.h>
#include <unistd.h>
#include "dgemm_compressed_cuda.h"
#include "cuda_utils.h"
//
// plink2gpu function
//
int plink2gpu(char *plink, char *plink_transposed, int snps,
int indiv, double *f, int n, void **GPU_obj) {
/*
Moves SNP matrix, its transposed and the according allele frequencies to the
device and stores the pointer to this data in a separate object, a pointer
to which is then returned.
Parameters:
plink: Pointer to SNP matrix in plink format of size ceil(indiv/4) * snps
+3
plink_transpoed: Pointer to transposed SNP matrix in plink format
f: Pointer to vector of allele frequencies
snps: Pointer to number of snps
indiv: Pointer to number of individuals
GPU_obj: void pointer to a pointer in which the GPU object is stored
*/
// Print compile info
print_compile_info("dgemm_compressed");
//
// Initialize CUDA variables
//
cudaError_t err;
uint8_t *d_plink, *d_plink_transposed;
double *d_f, *d_unit, *d_C, *d_D;
double *d_B;
long n_bytes_per_snp =
(indiv - 1) / 4 + 1; // number of columns of Z if individuals
// are zero-padded to be a multiple of 4
long n_bytes_per_indiv =
(snps - 1) / 4 +
1; // number of columns of Z^T if SNPs are zero-padded to be a multiple of 4
long size_buffer = 4 * ((long(max(snps, indiv)) - 1) / 4 + 1) * long(n);
// Maximal size of the matrices B and C on the device
// Matrices are forced to have a number of rows which is a multiple of 4 by
// zero-padding This allows us to deal with SNP matrices with unaligned
// dimensions which are themselves zero-padded
debug_info("Dimensions: (%d,%d), size in bytes: %ld, size_t %d", snps, indiv, n_bytes_per_snp * long(snps) + long(indiv) * n_bytes_per_indiv, sizeof(size_t));
if(checkCuda() != 0){
return 1;
}
int device = switchDevice();
if(device == -1){
return 1;
}
// Check if enough memory is available
size_t required_mem = 3 * size_buffer * sizeof(double) + n_bytes_per_snp * long(snps) +
n_bytes_per_indiv * long(indiv);
if (checkDevMemory(required_mem) != 0) {
return 1;
}
//
// Allocate device memory
//
err = cudaMalloc((void **)&d_plink, n_bytes_per_snp * long(snps));
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaMalloc((void **)&d_plink_transposed, n_bytes_per_indiv * long(indiv));
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaMalloc((void **)&d_f, sizeof(double) * snps);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaMalloc((void **)&d_unit, sizeof(double) * indiv);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaMalloc((void **)&d_B, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
err = cudaMalloc((void **)&d_C, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
err = cudaMalloc((void **)&d_D, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
//
// Copy data to device
//
err = cudaMemcpy(d_plink, plink, long(n_bytes_per_snp) * long(snps), cudaMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaMemcpy(d_plink_transposed, plink_transposed, long(n_bytes_per_indiv) * long(indiv),
cudaMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaMemcpy(d_f, f, sizeof(double) * long(snps), cudaMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
// Fill d_unit with 1.0s
thrust::device_ptr<double> d_unit_thrust(d_unit);
thrust::fill(d_unit_thrust, d_unit_thrust + indiv, 1.0);
err = cudaGetLastError();
if (checkError(__func__, __LINE__, err) != 0)
return (1);
//
// Initialize GPU_gemm_storage object
//
struct GPU_gemm_storage *GPU_storage_obj =
(struct GPU_gemm_storage *)malloc(sizeof(struct GPU_gemm_storage));
GPU_storage_obj->d_plink = d_plink;
GPU_storage_obj->d_plink_transposed = d_plink_transposed;
GPU_storage_obj->d_f = d_f;
GPU_storage_obj->d_unit = d_unit;
GPU_storage_obj->d_B = d_B;
GPU_storage_obj->d_C = d_C;
GPU_storage_obj->d_D = d_D;
GPU_storage_obj->size_buffer = size_buffer;
GPU_storage_obj->snps = snps;
GPU_storage_obj->indiv = indiv;
GPU_storage_obj->device = device;
debug_info("Pointer d_plink %d, d_plink_transposed %d", d_plink, d_plink_transposed);
// Set pointer to initialized object
*GPU_obj = (void *)GPU_storage_obj;
return 0;
}
//
// freegpu function
//
int freegpu(void **GPU_obj){
cudaError_t err;
if(checkCuda() != 0){
return 1;
}
if(*GPU_obj == NULL){
return 1;
}
// Free device memory and derefence storage object
struct GPU_gemm_storage *GPU_storage_obj = (struct GPU_gemm_storage *) (*GPU_obj);
err = cudaFree(GPU_storage_obj->d_plink);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaFree(GPU_storage_obj->d_plink_transposed);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaFree(GPU_storage_obj->d_f);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaFree(GPU_storage_obj->d_unit);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaFree(GPU_storage_obj->d_B);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaFree(GPU_storage_obj->d_C);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
err = cudaFree(GPU_storage_obj->d_D);
if (checkError(__func__, __LINE__, err) != 0)
return (1);
free(GPU_storage_obj);
GPU_obj = NULL;
return 0;
}
//
// dgemm_compressed_gpu function
//
int dgemm_compressed_gpu(bool transA, void *GPU_obj, int n, double *B, int ldb,
int centered, int normalized, double *C, int ldc) {
/*
Performs one of the operations
C <- alpha * op(M - 2 * 1_indiv f^T) * op(B) + beta * C
on the GPU, where
op(X) = X or op(X) = X^T,
alpha and beta are scalars,
M is a compressed genotype matrix of dimensions indiv times snps stored in row-major,
f is a vector of allele frequencies of length snps,
op(B) is a matrix of double precision and number of rows equal to number of columns of op(M)
C is a matrix of double precision
Parameters:
transa: Specifies the form of op(M) used in the matrix multiplication. If
transa = true, then op(M) = M^T. If transa = false, then op(M) =
M.
m: Specifies the number of rows of op(M - 2* 1_k*f^T) and C
n: Specifies the number of columns of op(B) and C
k: Specifies the number of columns of op(M - 2* 1_k*f^T) and rows of
op(B) k1: Specifies the number of columns of op(M) in compressed format
alpha: Not supported currently, only alpha=1 is allowed. Specifies the
scalar alpha Storage: Struct which stores device pointers to both M and its
transposed as well as a device pointer to the vector of allele frequencies f
lda: Not supported currently
B: Specifies the matrix B
ldb: Not supported currently
beta: Specifies the scalar beta. When beta is
equal to zero, then C need not be set on input.
C: Specifies the matrix C
ldc: Not supported currently
A further boost in performance can be achieved if the whole PCG is transfered
to the GPU to avoid data movement.
*/
//
// Initialization
//
// Initialize helper variables
cudaError_t err;
cublasStatus_t cublas_status;
cublasHandle_t cublas_handle;
struct GPU_gemm_storage *GPU_storage_obj = (struct GPU_gemm_storage *) GPU_obj;
// Initialize device pointer for M
// cutlass function assumes row major for M and PLINK bed uses SNP major, hence
// pointer plink is needed for transA = 't' and plink_transposed if transA =
// 'N'
uint8_t *d_M = transA ? GPU_storage_obj->d_plink : GPU_storage_obj->d_plink_transposed;
cutlass::u4f64_t *d_B = reinterpret_cast<cutlass::u4f64_t *>(GPU_storage_obj->d_B);
double *d_f = GPU_storage_obj->d_f,
*d_unit = GPU_storage_obj->d_unit;
double *d_C = GPU_storage_obj->d_C;
double *d_D = GPU_storage_obj->d_D;
long m = transA ? GPU_storage_obj->snps : GPU_storage_obj->indiv;
long k = transA ? GPU_storage_obj->indiv : GPU_storage_obj->snps;
long size_buffer = GPU_storage_obj->size_buffer;
long k1 = (k - 1) / 4 + 1;
debug_info("\tEntering GPU multiplication\n");
debug_info("Pointer: d_M %d, Dimensions: m %ld, k %ld, k1 %ld, n %ld", d_M, m, k, k1, n);
const double alpha = 1.0,
alpha_n2 = -2.0,
beta = 0.0;
double *d_workspace = NULL;
// Check CUDA installation
if(checkCuda() != 0){
return 1;
}
// Create cuBLAS handle
cublas_status = cublasCreate(&cublas_handle);
if (checkError(__func__, __LINE__, cublas_status) != 0)
return 1;
// Zero-fill matrices C and B to avoid spurious results
err = cudaMemset(d_B, 0, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
err = cudaMemset(d_C, 0, sizeof(double) * err);
if (checkError(__func__, __LINE__, cublas_status) != 0)
return 1;
err = cudaMemset(d_D, 0, sizeof(double) * size_buffer);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
// Copy data to device
debug_info("Memcpy dstpitch %d, srcpitch %d, width %d, height %d",
sizeof(double) * 4 * ((k - 1) / 4 + 1), sizeof(double) * k,
sizeof(double) * k, sizeof(double) * n);
err = cudaMemcpy2D(d_B, sizeof(double) * 4 * ((k - 1) / 4 + 1), B,
sizeof(double) * k, sizeof(double) * k, n,
cudaMemcpyHostToDevice);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
//
// SNP matrix multiplication
// The following section multiplies the SNP matrix with a vector of doubles in
// cutlass
//
// Create a problem size struct for matrix multiplication
cutlass::gemm::GemmCoord problem_size_packed(m, n, k1);
// Declare Gemm problem
using CutlassGemm = typename cutlass::gemm::device::Gemm<
uint8_t, cutlass::layout::RowMajor, cutlass::u4f64_t,
cutlass::layout::ColumnMajor, double, cutlass::layout::RowMajor, double,
cutlass::arch::OpClassSimt,
cutlass::arch::Sm61, // TODO: Check template hierachy if this is required
cutlass::gemm::GemmShape<32, 32, 16>, // Do not change
cutlass::gemm::GemmShape<16, 16, 16>, // Might be tuned but must be
// smaller than ThreadblockShape
cutlass::gemm::GemmShape<1, 1, 1>, // Do not change
cutlass::epilogue::thread::LinearCombination<double, 1, double, double>,
typename cutlass::gemm::threadblock::GemmIdentityThreadblockSwizzle<>, 2,
1, 1, false,
cutlass::arch::OpMultiplyAdd // Operator
>;
debug_info("Size: %ld", k1 * n);
// Define CUTLASS GEMM arguments
typename CutlassGemm::Arguments arguments{
problem_size_packed, // problem size of matrix multiplication
{d_M, k1}, // reference to matrix M on device
{d_B, k1}, // reference to matrix B on device
{d_C, n}, // reference to matrix C on device
{d_C, n}, // reference to matrix D on device
{alpha, beta} // tuple of alpha and beta
};
// Calculate CUTLASS GEMM workspace size
size_t workspace_size = CutlassGemm::get_workspace_size(arguments);
// Allocate workspace memory
cudaMalloc((void **)&d_workspace, max(workspace_size, sizeof(double) * n));
// Instantiate CUTLASS kernel depending on templates
CutlassGemm gemm_op;
// Test if problem can be implemented
cutlass::Status status = gemm_op.can_implement(arguments);
if (status != cutlass::Status::kSuccess)
printf("Can't implement\n");
// Initialize CUTLASS kernel with arguments and workspace pointer
status = gemm_op.initialize(arguments, d_workspace);
if (status != cutlass::Status::kSuccess)
printf("Error in initialization\n");
// Launch initialized CUTLASS kernel
status = gemm_op(); // Actual gemm op
cudaDeviceSynchronize();
if (status != cutlass::Status::kSuccess)
printf("Operation error %d\n", (int) status);
// Catch all accumulated errors from previous cuda launches
err = cudaGetLastError();
if (checkError(__func__, __LINE__, err) != 0)
return (1);
//
// Transpose
// Switch to column major
//
cublas_status = cublasDgeam(cublas_handle, //
CUBLAS_OP_T, // C needs to be transposed
CUBLAS_OP_N, // No-op on B
m, // Number of rows of C after transposing
n, // Number of columns of C after transposing
&alpha, // alpha
d_C, // matrix A
n, // lda
&beta, // beta
d_D, // matrix B
m, // ldb
d_D, // matrix C
m); // ldb
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
//
// Genotype centering
// The following section performs genotype centering by substracting op(2 *
// 1_indiv * f^T ) B from C
//
// dgemv performs C <- alpha op(A) x + beta y
// We calculate f^T B if transa = true or 1_k^T B if transa = false
// cuBLAS only supports op(A) x, so we calculate B^T f (B^T 1_k resp.), which
// returns the same as the result is a vector
// B is of dimension (snps, n) if
// transa = 'N' and of dimension (indiv,n) if transa = true cuBLAS assumes
// column-major and d_B is stored in column-major, hence trans = CUBLAS_OP_T
switch(centered){
case 0: break;
case 1:
{
debug_info("Centering: B^f");
cublas_status = cublasDgemv(cublas_handle, // handle
CUBLAS_OP_T, // trans
k, // number of rows of A
n, // number of cols of A
&alpha_n2, // alpha
reinterpret_cast<double *>(d_B), // matrix A
((k - 1) / 4 + 1) * 4, // lda
transA ? d_unit : d_f, // vector x
1, // incx
&beta, // beta
d_workspace, // vector y
1); // incy
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
debug_info("Centering: C");
// Now every column i is scaled with alpha_i 1_k if transa = true of alpha_i
// f if transa = false, where alpha_i = B_i^T f (alpha_i = B_i^T 1_k resp)
cublas_status =
cublasSetPointerMode(cublas_handle, CUBLAS_POINTER_MODE_DEVICE);
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
for (int i = 0; i < n; i++) {
cublas_status = cublasDaxpy(cublas_handle, // handle
m, // number of rows
d_workspace + i, // alpha
transA ? d_f : d_unit, // x
1, // incx
d_D + i * m, // y
1); // incy
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
}
break;
}
default: checkError(__func__, __LINE__, cudaErrorInvalidValue); return 1;
}
//
// Wrap-up
//
// Copy results back to the host
debug_info("Copy back");
err = cudaMemcpy(C, d_D, sizeof(double) * m * n, cudaMemcpyDeviceToHost);
if (checkError(__func__, __LINE__, err) != 0)
return 1;
debug_info("Free pointers");
cudaDeviceSynchronize();
cublas_status = cublasDestroy(cublas_handle);
if (checkError(__func__, __LINE__, cublas_status) != 0)
return (1);
cudaFree(d_workspace);
// debug_info("C: ");
// for(int i = 0; i < 10; i++){
// debug_info("%f ", C[i]);
// }
debug_info("Return");
return 0;
}
|
2e9da48915b9003203d2a3831b0b97695ecb34b8.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_sgemm_reduce,
// because it depends on the CUDA architecture at runtime.
///////////////////////////////////////////////////////////////////////////////////////////////////
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce2( /*int n,*/ int j, int k, int i, float x[][ BLK_N+1 ][ n+1 ] )
{
__syncthreads();
/*
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[j][k][i] += x[j][k][i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[j][k][i] += x[j][k][i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[j][k][i] += x[j][k][i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[j][k][i] += x[j][k][i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[j][k][i] += x[j][k][i+ 64]; } __syncthreads(); }
*/
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[j][k][i] += x[j][k][i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[j][k][i] += x[j][k][i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[j][k][i] += x[j][k][i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[j][k][i] += x[j][k][i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[j][k][i] += x[j][k][i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[j][k][i] += x[j][k][i+ 1]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void sgemm_reduce_kernel(
int m, int n, int k,
float alpha,
const float* __restrict__ d_A, int lda,
const float* __restrict__ d_B, int ldb,
float beta,
float * __restrict__ d_C, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int i = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n){
const float *dA = d_A + (blockIdx.x*BLK_M + threadIdx.y) * lda;
const float *dB = d_B + (blockIdx.y*BLK_N + threadIdx.z) * ldb;
float *dC = d_C + blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
__shared__ float sum[BLK_M][BLK_N+1][BLK_K+1];
float lsum;
/* w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < k; j += BLK_K )
lsum += MAGMA_S_CNJG( dA[j] )* dB[j];
sum[threadIdx.y][threadIdx.z][i] = lsum;
sum_reduce2< BLK_K >( threadIdx.y, threadIdx.z, i, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_S_EQUAL(beta, MAGMA_S_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[threadIdx.y][threadIdx.z][0];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[threadIdx.y][threadIdx.z][0];
}
}
#endif
}
//==============================================================================
extern "C" void
magmablas_sgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
const float *d_A, magma_int_t lda,
const float *d_B, magma_int_t ldb,
float beta,
float *d_C, magma_int_t ldc )
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
SGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
===================================================================== */
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N));
dim3 blocks( (m-1)/BLK_M + 1, (n-1)/BLK_N + 1 );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( sgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, magma_stream ,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N));
dim3 blocks( (m-1)/BLK_M + 1, (n-1)/BLK_N + 1 );
dim3 threads( BLK_K, BLK_M, BLK_N );
hipLaunchKernelGGL(( sgemm_reduce_kernel<BLK_K>) , dim3(blocks), dim3(threads), 0, magma_stream ,
m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
}
}
//==============================================================================
| 2e9da48915b9003203d2a3831b0b97695ecb34b8.cu | /*
-- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
@generated s Tue Dec 17 13:18:45 2013
*/
#include "common_magma.h"
///////////////////////////////////////////////////////////////////////////////////////////////////
// size of work for a thread block
#define BLK_M 16
#define BLK_N 16
// BLK_K gets defined in magmablas_sgemm_reduce,
// because it depends on the CUDA architecture at runtime.
///////////////////////////////////////////////////////////////////////////////////////////////////
// ----------------------------------------
// Does sum reduction of array x, leaving total in x[0].
// Contents of x are destroyed in the process.
// With k threads, can reduce array up to 2*k in size.
// Assumes number of threads <= 1024 (which is max number of threads up to CUDA capability 3.0)
// Having n as template parameter allows compiler to evaluate some conditions at compile time.
template< int n >
__device__ void sum_reduce2( /*int n,*/ int j, int k, int i, float x[][ BLK_N+1 ][ n+1 ] )
{
__syncthreads();
/*
if ( n > 1024 ) { if ( i < 1024 && i + 1024 < n ) { x[j][k][i] += x[j][k][i+1024]; } __syncthreads(); }
if ( n > 512 ) { if ( i < 512 && i + 512 < n ) { x[j][k][i] += x[j][k][i+ 512]; } __syncthreads(); }
if ( n > 256 ) { if ( i < 256 && i + 256 < n ) { x[j][k][i] += x[j][k][i+ 256]; } __syncthreads(); }
if ( n > 128 ) { if ( i < 128 && i + 128 < n ) { x[j][k][i] += x[j][k][i+ 128]; } __syncthreads(); }
if ( n > 64 ) { if ( i < 64 && i + 64 < n ) { x[j][k][i] += x[j][k][i+ 64]; } __syncthreads(); }
*/
if ( n > 32 ) { if ( i < 32 && i + 32 < n ) { x[j][k][i] += x[j][k][i+ 32]; } __syncthreads(); }
// probably don't need __syncthreads for < 16 threads
// because of implicit warp level synchronization.
if ( n > 16 ) { if ( i < 16 && i + 16 < n ) { x[j][k][i] += x[j][k][i+ 16]; } __syncthreads(); }
if ( n > 8 ) { if ( i < 8 && i + 8 < n ) { x[j][k][i] += x[j][k][i+ 8]; } __syncthreads(); }
if ( n > 4 ) { if ( i < 4 && i + 4 < n ) { x[j][k][i] += x[j][k][i+ 4]; } __syncthreads(); }
if ( n > 2 ) { if ( i < 2 && i + 2 < n ) { x[j][k][i] += x[j][k][i+ 2]; } __syncthreads(); }
if ( n > 1 ) { if ( i < 1 && i + 1 < n ) { x[j][k][i] += x[j][k][i+ 1]; } __syncthreads(); }
}
// end sum_reduce
//==============================================================================
// BLK_K size is templated, as it depends on CUDA architecture at runtime.
// Hmm... how to compile for both CUDA arch 1.x and 2.x?
template< int BLK_K >
__global__
void sgemm_reduce_kernel(
int m, int n, int k,
float alpha,
const float* __restrict__ d_A, int lda,
const float* __restrict__ d_B, int ldb,
float beta,
float * __restrict__ d_C, int ldc)
{
#if (__CUDA_ARCH__ >= 200)
const int i = threadIdx.x;
if (blockIdx.x*BLK_M + threadIdx.y < m && blockIdx.y*BLK_N + threadIdx.z < n){
const float *dA = d_A + (blockIdx.x*BLK_M + threadIdx.y) * lda;
const float *dB = d_B + (blockIdx.y*BLK_N + threadIdx.z) * ldb;
float *dC = d_C + blockIdx.x*BLK_M + blockIdx.y*BLK_N * ldc;
__shared__ float sum[BLK_M][BLK_N+1][BLK_K+1];
float lsum;
/* w := v' * C */
lsum = MAGMA_S_ZERO;
for( int j = i; j < k; j += BLK_K )
lsum += MAGMA_S_CNJG( dA[j] )* dB[j];
sum[threadIdx.y][threadIdx.z][i] = lsum;
sum_reduce2< BLK_K >( threadIdx.y, threadIdx.z, i, sum );
/* C := C - v * w */
__syncthreads();
if (threadIdx.x == 0) {
if (MAGMA_S_EQUAL(beta, MAGMA_S_ZERO))
dC[threadIdx.y + threadIdx.z*ldc] = alpha*sum[threadIdx.y][threadIdx.z][0];
else
dC[threadIdx.y + threadIdx.z*ldc] = beta* dC[threadIdx.y + threadIdx.z*ldc] +
alpha*sum[threadIdx.y][threadIdx.z][0];
}
}
#endif
}
//==============================================================================
extern "C" void
magmablas_sgemm_reduce(
magma_int_t m, magma_int_t n, magma_int_t k,
float alpha,
const float *d_A, magma_int_t lda,
const float *d_B, magma_int_t ldb,
float beta,
float *d_C, magma_int_t ldc )
{
/* -- MAGMA (version 1.4.1) --
Univ. of Tennessee, Knoxville
Univ. of California, Berkeley
Univ. of Colorado, Denver
December 2013
Purpose
=======
SGEMM_REDUCE performs one of the matrix-matrix operations
C := alpha*A^T*B + beta*C,
where alpha and beta are scalars, and A, B and C are matrices, with A
a k-by-m matrix, B a k-by-n matrix, and C an m-by-n matrix.
This routine is tuned for m, n << k. Typically, m and n are expected
to be less than 128.
===================================================================== */
magma_int_t arch = magma_getdevice_arch();
if ( arch < 200 ) {
// --------------------
// call CUDA ARCH 1.x -- maximum 512 threads
const int NUM_THREADS = 512;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N));
dim3 blocks( (m-1)/BLK_M + 1, (n-1)/BLK_N + 1 );
dim3 threads( BLK_K, BLK_M, BLK_N );
sgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, magma_stream >>>
( m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
}
else {
// --------------------
// call CUDA ARCH 2.x -- maximum 1024 threads
const int NUM_THREADS = 1024;
const int BLK_K = (NUM_THREADS / (BLK_M * BLK_N));
dim3 blocks( (m-1)/BLK_M + 1, (n-1)/BLK_N + 1 );
dim3 threads( BLK_K, BLK_M, BLK_N );
sgemm_reduce_kernel<BLK_K> <<< blocks, threads, 0, magma_stream >>>
( m, n, k, alpha, d_A, lda, d_B, ldb, beta, d_C, ldc );
}
}
//==============================================================================
|
c98291edbfb69897a695b0d98cc42e7568d38635.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdlib.h>
#include <stdio.h>
#define a(i,l) A[(i)*k + (l)]
#define b(l,j) B[(l)*n + (j)]
#define c(i,j) C[(i)*n + (j)]
#define BLOCK_SIZE 16
#define num_el 4
// Declarations
extern "C" {
void matmult_gpu1(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu2(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu3(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu4(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu5(int m, int n, int k,double *h_A,double *h_B,double *h_C);
}
__global__ void matmult1(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult2(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult3(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult4(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult5(int m, int n, int k,double *A,double *B,double *C);
void matmult_gpu1(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
hipMalloc((void **)&d_A, size_A);
hipMalloc((void **)&d_B, size_B);
hipMalloc((void **)&d_C, size_C);
dim3 dimBlock(1, 1, 1); // Num threads
dim3 dimGrid(1, 1, 1); // Num blocks
hipMemcpy(d_A, h_A, size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size_B, hipMemcpyHostToDevice);
hipMemset(d_C, 0, size_C);
hipLaunchKernelGGL(( matmult1), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, size_C, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matmult1(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
for (i = 0; i<m; i++) {
for (l = 0; l<k; l++) {
for (j = 0; j<n; j++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
}
}
}
}
void matmult_gpu2(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
hipMalloc((void **)&d_A, size_A);
hipMalloc((void **)&d_B, size_B);
hipMalloc((void **)&d_C, size_C);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid(ceil((double)n/dimBlock.x), ceil((double)m/dimBlock.y), 1); // Num blocks
//printf("x: %d, y: %d, z: %d\n", dimGrid.x, dimGrid.y, dimGrid.z);
hipMemcpy(d_A, h_A, size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size_B, hipMemcpyHostToDevice);
hipMemset(d_C, 0, size_C);
hipLaunchKernelGGL(( matmult2), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, size_C, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matmult2(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = blockIdx.y * blockDim.y + threadIdx.y;
double C_reg = 0.0;
if (i < m && j < n) {
for (l = 0; l<k; l++) {
C_reg = C_reg + a(i,l) * b(l,j);
}
c(i,j) = C_reg;
}
}
void matmult_gpu3(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
hipMalloc((void **)&d_A, size_A);
hipMalloc((void **)&d_B, size_B);
hipMalloc((void **)&d_C, size_C);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y) / 2), 1); // Num blocks
//printf("x: %d, y: %d, z: %d\n", dimGrid.x, dimGrid.y, dimGrid.z);
hipMemcpy(d_A, h_A, size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size_B, hipMemcpyHostToDevice);
hipMemset(d_C, 0, size_C);
hipLaunchKernelGGL(( matmult3), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, size_C, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matmult3(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
double C_reg[2] = {0.0};
if (i < m-1 && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
C_reg[1] = C_reg[1] + a(i+1,l) * b(l,j);
}
//Copy back to global memory
c(i,j) = C_reg[0];
c(i+1,j) = C_reg[1];
} else if (i == m-1 && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
}
//Copy back to global memory
c(i,j) = C_reg[0];
}
/*
j = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < m && j < n-1) {
for (l = 0; l<k; l++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
c(i,j+1) = c(i,j+1) + a(i,l) * b(l,j+1);
}
} else if (i < m && j == n-1) {
for (l = 0; l<k; l++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
}
}
*/
}
void matmult_gpu4(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
hipMalloc((void **)&d_A, size_A);
hipMalloc((void **)&d_B, size_B);
hipMalloc((void **)&d_C, size_C);
hipMemcpy(d_A, h_A, size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size_B, hipMemcpyHostToDevice);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y) / num_el), 1); // Num blocks
hipMemset(d_C, 0, size_C);
hipLaunchKernelGGL(( matmult4), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, size_C, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matmult4(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l,s;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = (blockIdx.y * blockDim.y + threadIdx.y) * num_el;
double C_reg[num_el] = {0.0};
if (i < m-num_el && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
C_reg[1] = C_reg[1] + a(i+1,l) * b(l,j);
C_reg[2] = C_reg[2] + a(i+2,l) * b(l,j);
C_reg[3] = C_reg[3] + a(i+3,l) * b(l,j);
}
c(i,j) = C_reg[0];
c(i+1,j) = C_reg[1];
c(i+2,j) = C_reg[2];
c(i+3,j) = C_reg[3];
} else if (i >= m-num_el && j < n) {
for (s = i; s<m; s++) {
for (l = 0; l<k; l++) {
C_reg[s-i] = C_reg[s-i] + a(s,l) * b(l,j);
}
c(s,j) = C_reg[s-i];
}
}
}
void matmult_gpu5(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
hipMalloc((void **)&d_A, size_A);
hipMalloc((void **)&d_B, size_B);
hipMalloc((void **)&d_C, size_C);
hipMemcpy(d_A, h_A, size_A, hipMemcpyHostToDevice);
hipMemcpy(d_B, h_B, size_B, hipMemcpyHostToDevice);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y)), 1); // Num blocks
hipMemset(d_C, 0, size_C);
hipLaunchKernelGGL(( matmult5), dim3(dimGrid),dim3(dimBlock), 0, 0, m, n, k, d_A, d_B, d_C);
hipDeviceSynchronize();
hipMemcpy(h_C, d_C, size_C, hipMemcpyDeviceToHost);
hipFree(d_A);
hipFree(d_B);
hipFree(d_C);
}
__global__ void matmult5(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
double Cvalue = 0;
double * Asub, *Bsub, *Csub;
i = threadIdx.y;
j = threadIdx.x;
int k_blocked = (k/BLOCK_SIZE);
//printf("k_blocked %d\n", k_blocked);
//Get Csub matrix
//k is here the A.stride
Csub = &C[n*BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x];
for (l = 0; l<k_blocked; l++) {
//Shared memory to store sub-matrices of A and B
__shared__ double as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double bs[BLOCK_SIZE][BLOCK_SIZE];
Asub = &A[k*BLOCK_SIZE * blockIdx.y +
BLOCK_SIZE * l];
Bsub = &B[n*BLOCK_SIZE * l +
BLOCK_SIZE * blockIdx.x];
as[threadIdx.y][threadIdx.x] = Asub[k*i + j];
bs[threadIdx.y][threadIdx.x] = Bsub[n*i + j];
__syncthreads();
//Multiply sub matrices
for(int e = 0; e < BLOCK_SIZE; ++e){
Cvalue += as[i][e] * bs[e][j];
}
__syncthreads();
}
//Write back to global memory somehow
//A.elements[row * A.stride + col] = value
Csub[i*n+j] = Cvalue;
}
| c98291edbfb69897a695b0d98cc42e7568d38635.cu | #include <stdlib.h>
#include <stdio.h>
#define a(i,l) A[(i)*k + (l)]
#define b(l,j) B[(l)*n + (j)]
#define c(i,j) C[(i)*n + (j)]
#define BLOCK_SIZE 16
#define num_el 4
// Declarations
extern "C" {
void matmult_gpu1(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu2(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu3(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu4(int m, int n, int k,double *h_A,double *h_B,double *h_C);
void matmult_gpu5(int m, int n, int k,double *h_A,double *h_B,double *h_C);
}
__global__ void matmult1(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult2(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult3(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult4(int m, int n, int k,double *A,double *B,double *C);
__global__ void matmult5(int m, int n, int k,double *A,double *B,double *C);
void matmult_gpu1(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
dim3 dimBlock(1, 1, 1); // Num threads
dim3 dimGrid(1, 1, 1); // Num blocks
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, size_C);
matmult1<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult1(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
for (i = 0; i<m; i++) {
for (l = 0; l<k; l++) {
for (j = 0; j<n; j++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
}
}
}
}
void matmult_gpu2(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid(ceil((double)n/dimBlock.x), ceil((double)m/dimBlock.y), 1); // Num blocks
//printf("x: %d, y: %d, z: %d\n", dimGrid.x, dimGrid.y, dimGrid.z);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, size_C);
matmult2<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult2(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = blockIdx.y * blockDim.y + threadIdx.y;
double C_reg = 0.0;
if (i < m && j < n) {
for (l = 0; l<k; l++) {
C_reg = C_reg + a(i,l) * b(l,j);
}
c(i,j) = C_reg;
}
}
void matmult_gpu3(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y) / 2), 1); // Num blocks
//printf("x: %d, y: %d, z: %d\n", dimGrid.x, dimGrid.y, dimGrid.z);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
cudaMemset(d_C, 0, size_C);
matmult3<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult3(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = (blockIdx.y * blockDim.y + threadIdx.y) * 2;
double C_reg[2] = {0.0};
if (i < m-1 && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
C_reg[1] = C_reg[1] + a(i+1,l) * b(l,j);
}
//Copy back to global memory
c(i,j) = C_reg[0];
c(i+1,j) = C_reg[1];
} else if (i == m-1 && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
}
//Copy back to global memory
c(i,j) = C_reg[0];
}
/*
j = (blockIdx.x * blockDim.x + threadIdx.x) * 2;
i = blockIdx.y * blockDim.y + threadIdx.y;
if (i < m && j < n-1) {
for (l = 0; l<k; l++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
c(i,j+1) = c(i,j+1) + a(i,l) * b(l,j+1);
}
} else if (i < m && j == n-1) {
for (l = 0; l<k; l++) {
c(i,j) = c(i,j) + a(i,l) * b(l,j);
}
}
*/
}
void matmult_gpu4(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y) / num_el), 1); // Num blocks
cudaMemset(d_C, 0, size_C);
matmult4<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult4(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l,s;
j = blockIdx.x * blockDim.x + threadIdx.x;
i = (blockIdx.y * blockDim.y + threadIdx.y) * num_el;
double C_reg[num_el] = {0.0};
if (i < m-num_el && j < n) {
for (l = 0; l<k; l++) {
C_reg[0] = C_reg[0] + a(i,l) * b(l,j);
C_reg[1] = C_reg[1] + a(i+1,l) * b(l,j);
C_reg[2] = C_reg[2] + a(i+2,l) * b(l,j);
C_reg[3] = C_reg[3] + a(i+3,l) * b(l,j);
}
c(i,j) = C_reg[0];
c(i+1,j) = C_reg[1];
c(i+2,j) = C_reg[2];
c(i+3,j) = C_reg[3];
} else if (i >= m-num_el && j < n) {
for (s = i; s<m; s++) {
for (l = 0; l<k; l++) {
C_reg[s-i] = C_reg[s-i] + a(s,l) * b(l,j);
}
c(s,j) = C_reg[s-i];
}
}
}
void matmult_gpu5(int m, int n, int k,double *h_A,double *h_B,double *h_C) {
double *d_A, *d_B, *d_C;
int size_A = m * k * sizeof(double);
int size_B = k * n * sizeof(double);
int size_C = m * n * sizeof(double);
cudaMalloc((void **)&d_A, size_A);
cudaMalloc((void **)&d_B, size_B);
cudaMalloc((void **)&d_C, size_C);
cudaMemcpy(d_A, h_A, size_A, cudaMemcpyHostToDevice);
cudaMemcpy(d_B, h_B, size_B, cudaMemcpyHostToDevice);
dim3 dimBlock(16, 16, 1); // Num threads
dim3 dimGrid((ceil((double)n/dimBlock.x)), ceil(((double)m/dimBlock.y)), 1); // Num blocks
cudaMemset(d_C, 0, size_C);
matmult5<<<dimGrid,dimBlock>>>(m, n, k, d_A, d_B, d_C);
cudaDeviceSynchronize();
cudaMemcpy(h_C, d_C, size_C, cudaMemcpyDeviceToHost);
cudaFree(d_A);
cudaFree(d_B);
cudaFree(d_C);
}
__global__ void matmult5(int m, int n, int k,double *A,double *B,double *C) {
int i,j,l;
double Cvalue = 0;
double * Asub, *Bsub, *Csub;
i = threadIdx.y;
j = threadIdx.x;
int k_blocked = (k/BLOCK_SIZE);
//printf("k_blocked %d\n", k_blocked);
//Get Csub matrix
//k is here the A.stride
Csub = &C[n*BLOCK_SIZE * blockIdx.y + BLOCK_SIZE * blockIdx.x];
for (l = 0; l<k_blocked; l++) {
//Shared memory to store sub-matrices of A and B
__shared__ double as[BLOCK_SIZE][BLOCK_SIZE];
__shared__ double bs[BLOCK_SIZE][BLOCK_SIZE];
Asub = &A[k*BLOCK_SIZE * blockIdx.y +
BLOCK_SIZE * l];
Bsub = &B[n*BLOCK_SIZE * l +
BLOCK_SIZE * blockIdx.x];
as[threadIdx.y][threadIdx.x] = Asub[k*i + j];
bs[threadIdx.y][threadIdx.x] = Bsub[n*i + j];
__syncthreads();
//Multiply sub matrices
for(int e = 0; e < BLOCK_SIZE; ++e){
Cvalue += as[i][e] * bs[e][j];
}
__syncthreads();
}
//Write back to global memory somehow
//A.elements[row * A.stride + col] = value
Csub[i*n+j] = Cvalue;
}
|
6e541b3ac9cfed2fd96c308495b7fff7fe7e4c2f.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "../../settings.h"
__global__ void set_chunk_initial_state(
const int x,
const int y,
const double default_energy,
const double default_density,
double* energy0,
double* density)
{
const int gid = threadIdx.x+blockDim.x*blockIdx.x;
if(gid >= x*y) return;
energy0[gid]=default_energy;
density[gid]=default_density;
}
__global__ void set_chunk_state(
const int x,
const int y,
const double* vertex_x,
const double* vertex_y,
const double* cell_x,
const double* cell_y,
double* density,
double* energy0,
double* u,
State state)
{
const int gid = threadIdx.x+blockDim.x*blockIdx.x;
const int x_loc = gid % x;
const int y_loc = gid / x;
int apply_state = 0;
if(gid < x*y)
{
if(state.geometry == RECTANGULAR)
{
apply_state = (
vertex_x[x_loc+1] >= state.x_min &&
vertex_x[x_loc] < state.x_max &&
vertex_y[y_loc+1] >= state.y_min &&
vertex_y[y_loc] < state.y_max);
}
else if(state.geometry == CIRCULAR)
{
double radius = sqrt(
(cell_x[x_loc]-state.x_min)*
(cell_x[x_loc]-state.x_min)+
(cell_y[y_loc]-state.y_min)*
(cell_y[y_loc]-state.y_min));
apply_state = (radius <= state.radius);
}
else if(state.geometry == POINT)
{
apply_state = (
vertex_x[x_loc] == state.x_min &&
vertex_y[y_loc] == state.y_min);
}
// Check if state applies at this vertex, and apply
if(apply_state)
{
energy0[gid] = state.energy;
density[gid] = state.density;
}
}
if(x_loc > 0 && x_loc < x-1 &&
y_loc > 0 && y_loc < y-1)
{
u[gid]=energy0[gid]*density[gid];
}
}
| 6e541b3ac9cfed2fd96c308495b7fff7fe7e4c2f.cu | #include "../../settings.h"
__global__ void set_chunk_initial_state(
const int x,
const int y,
const double default_energy,
const double default_density,
double* energy0,
double* density)
{
const int gid = threadIdx.x+blockDim.x*blockIdx.x;
if(gid >= x*y) return;
energy0[gid]=default_energy;
density[gid]=default_density;
}
__global__ void set_chunk_state(
const int x,
const int y,
const double* vertex_x,
const double* vertex_y,
const double* cell_x,
const double* cell_y,
double* density,
double* energy0,
double* u,
State state)
{
const int gid = threadIdx.x+blockDim.x*blockIdx.x;
const int x_loc = gid % x;
const int y_loc = gid / x;
int apply_state = 0;
if(gid < x*y)
{
if(state.geometry == RECTANGULAR)
{
apply_state = (
vertex_x[x_loc+1] >= state.x_min &&
vertex_x[x_loc] < state.x_max &&
vertex_y[y_loc+1] >= state.y_min &&
vertex_y[y_loc] < state.y_max);
}
else if(state.geometry == CIRCULAR)
{
double radius = sqrt(
(cell_x[x_loc]-state.x_min)*
(cell_x[x_loc]-state.x_min)+
(cell_y[y_loc]-state.y_min)*
(cell_y[y_loc]-state.y_min));
apply_state = (radius <= state.radius);
}
else if(state.geometry == POINT)
{
apply_state = (
vertex_x[x_loc] == state.x_min &&
vertex_y[y_loc] == state.y_min);
}
// Check if state applies at this vertex, and apply
if(apply_state)
{
energy0[gid] = state.energy;
density[gid] = state.density;
}
}
if(x_loc > 0 && x_loc < x-1 &&
y_loc > 0 && y_loc < y-1)
{
u[gid]=energy0[gid]*density[gid];
}
}
|
88cfcd24658e3bf9c0c6a0ca898a8b256aa6df9b.hip | // !!! This is a file automatically generated by hipify!!!
#define GLM_FORCE_CUDA
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <cmath>
#include <algorithm>
#include <thrust/reduce.h>
#include <thrust/gather.h>
#include <thrust/functional.h>
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include "cuda_icp_custom/utilityCore.hpp"
#include "cuda_icp_custom/svd3.h"
#include "cuda_icp_custom/kernel.h"
#include "device_launch_parameters.h"
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
hipError_t err = hipGetLastError();
if (hipSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// /*****************
// * Configuration *
// *****************/
// /*! Block size used for CUDA kernel launch. */
// #define blockSize 256
// #define sharedMemorySize 65536
// /*! Size of the starting area in simulation space. */
// // #define scene_scale 50.0f
// #define scene_scale 1.0f
dim3 threadsPerBlock(blockSize);
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
// int sizeTarget;
// int sizeScene;
// int numObjects;
// dim3 threadsPerBlock(blockSize);
// glm::vec4 *dev_pos;
// glm::vec3 *dev_color;
// int *dev_dist;
// int *dev_pair;
// KDTree::Node *dev_kd;
// glm::vec4 *host_pos;
// int *host_dist;
// int *host_pair;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
__host__ __device__ bool sortFuncX(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.x < p2.x;
}
__host__ __device__ bool sortFuncY(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.y < p2.y;
}
__host__ __device__ bool sortFuncZ(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.z < p2.z;
}
__global__ void transformPoint(int N, glm::vec4 *points, glm::mat4 transform) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
points[index] = glm::vec4(glm::vec3(transform * glm::vec4(glm::vec3(points[index]), 1)), 1);
}
__global__ void kernResetVec3Buffer(int N, glm::vec3 *intBuffer, glm::vec3 value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
ICP::ICP() {}
/**
* Initialize memory, update some globals
*/
void ICP::initSimulation(std::vector<glm::vec4> scene, std::vector<glm::vec4> target, KDTree::Node *kd) {
// hipSetDevice(0);
sizeScene = scene.size();
sizeTarget = target.size();
numObjects = sizeScene + sizeTarget;
hipMalloc((void**)&dev_pos, numObjects * sizeof(glm::vec4));
checkCUDAErrorWithLine("hipMalloc dev_pos failed!");
hipMalloc((void**)&dev_color, numObjects * sizeof(glm::vec4));
checkCUDAErrorWithLine("hipMalloc dev_color failed!");
hipMalloc((void**)&dev_dist, sizeTarget * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_dist failed!");
hipMalloc((void**)&dev_pair, sizeTarget * sizeof(int));
checkCUDAErrorWithLine("hipMalloc dev_pair failed!");
hipMalloc((void**)&dev_kd, sizeScene * sizeof(KDTree::Node));
checkCUDAErrorWithLine("hipMalloc dev_kd failed!");
int checksum = 0;
for (int i = 0; i < sizeScene; i++)
checksum += scene[i].w;
// KDTree::Node *kd = new KDTree::Node[sizeScene];
// KDTree::Create(scene, kd);
hipMemcpyAsync(dev_kd, kd, sizeScene*sizeof(KDTree::Node), hipMemcpyHostToDevice, cudaStreamPerThread);
int testsum = 0;
for (int i = 0; i < sizeScene; i++) {
testsum += kd[i].value.w;
}
printf("kd size: %i\n", sizeScene*sizeof(KDTree::Node));
//verify all items are in the kd tree
assert(checksum == testsum);
// copy both scene and target to output points
// observed scene points
hipMemcpyAsync(dev_pos, &scene[0], scene.size()*sizeof(glm::vec4), hipMemcpyHostToDevice, cudaStreamPerThread);
// rendered points
hipMemcpyAsync(&dev_pos[scene.size()], &target[0], target.size()*sizeof(glm::vec4), hipMemcpyHostToDevice, cudaStreamPerThread);
#if INITIAL_ROT
//add rotation and translation to target for test;
cout << "Applying random initial transformation\n";
glm::vec3 t(20, -22, 20);
glm::vec3 r(-.5, .6, .8);
glm::vec3 s(1, 1, 1);
glm::mat4 initial_rot = utilityCore::buildTransformationMatrix(t, r, s);
transformPoint << <dim3((target.size() + blockSize - 1) / blockSize), blockSize >> >(target.size(), &dev_pos[scene.size()], initial_rot);
#endif
//set colors for points
kernResetVec3Buffer << <dim3((scene.size() + blockSize - 1) / blockSize), blockSize >> >(scene.size(), dev_color, glm::vec3(1, 1, 1));
kernResetVec3Buffer << <dim3((target.size() + blockSize - 1) / blockSize), blockSize >> >(target.size(), &dev_color[scene.size()], glm::vec3(0, 1, 0));
// hipStreamSynchronize(cudaStreamPerThread);
hipStreamSynchronize(cudaStreamPerThread);
host_pos = (glm::vec4*) malloc(numObjects * sizeof(glm::vec4));
host_pair = (int*)malloc(target.size() * sizeof(int));
hipMemcpyAsync(host_pos, dev_pos, numObjects * sizeof(glm::vec4), hipMemcpyDeviceToHost, cudaStreamPerThread);
// hipStreamSynchronize(cudaStreamPerThread);
hipStreamSynchronize(cudaStreamPerThread);
}
void ICP::endSimulation() {
hipFree(dev_pos);
hipFree(dev_color);
hipFree(dev_dist);
hipFree(dev_pair);
hipFree(dev_kd);
free(host_pos);
free(host_pair);
hipStreamDestroy(cudaStreamPerThread);
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec4 *pos, float *vbo, float s_scale, int start) {
int index = threadIdx.x + (blockIdx.x * blockDim.x) + start;
float c_scale = -1.0f / s_scale;
if (index - start < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyColorsToVBO(int N, glm::vec3 *color, float *vbo, float s_scale, int start) {
int index = threadIdx.x + (blockIdx.x * blockDim.x) + start;
if (index - start < N) {
vbo[4 * index + 0] = color[index].x + 0.3f;
vbo[4 * index + 1] = color[index].y + 0.3f;
vbo[4 * index + 2] = color[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void ICP::copyPointsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
//batch copies to prevent memory errors
int batchSize = 1 << 16;
for (int i = 0; i <= numObjects; i += batchSize) {
int n = imin(batchSize, numObjects - i);
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(n, dev_pos, vbodptr_positions, scene_scale, i);
kernCopyColorsToVBO << <fullBlocksPerGrid, blockSize >> >(n, dev_color, vbodptr_velocities, scene_scale, i);
}
checkCUDAErrorWithLine("copyBoidsToVBO color failed!");
hipStreamSynchronize(cudaStreamPerThread);
}
/******************
* stepSimulation *
******************/
__global__ void findCorrespondence(int N, int sizeScene, glm::vec4 *cor, const glm::vec4 *points)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
glm::vec4 pt = points[i + sizeScene];
float best = glm::distance(glm::vec3(points[0]), glm::vec3(pt));
cor[i] = points[0];
for (int j = 1; j < sizeScene; j++) {
float d = glm::distance(glm::vec3(points[j]), glm::vec3(pt));
if (d < best) {
cor[i] = points[j];
best = d;
}
}
}
__device__ float getHyperplaneDist(const glm::vec4 *pt1, const glm::vec4 *pt2, int axis, bool *branch)
{
if (axis == 0) {
*branch = sortFuncX(*pt1, *pt2);
return abs(pt1->x - pt2->x);
}
if (axis == 1) {
*branch = sortFuncY(*pt1, *pt2);
return abs(pt1->y - pt2->y);
}
if (axis == 2) {
*branch = sortFuncZ(*pt1, *pt2);
return abs(pt1->z - pt2->z);
}
}
__global__ void findCorrespondenceKD(int N, int sizeScene, glm::vec4 *cor, const glm::vec4 *points, const KDTree::Node* tree)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
glm::vec4 pt = points[i + sizeScene];
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(tree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = tree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = tree[tree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
cor[i] = tree[bestIdx].value;
}
__global__ void outerProduct(int N, const glm::vec4 *vec1, const glm::vec4 *vec2, glm::mat3 *out)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
out[i] = glm::mat3( glm::vec3(vec1[i]) * vec2[i].x,
glm::vec3(vec1[i]) * vec2[i].y,
glm::vec3(vec1[i]) * vec2[i].z);
}
__global__ void euclideanError(int N, const glm::vec4 *vec1, const glm::vec4 *vec2, float *out)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
out[i] = sqrt((vec1[i].x - vec2[i].x) * (vec1[i].x - vec2[i].x) +
(vec1[i].y - vec2[i].y) * (vec1[i].y - vec2[i].y) +
(vec1[i].z - vec2[i].z) * (vec1[i].z - vec2[i].z));
}
/**
* Step the ICP algorithm.
*/
void ICP::stepCPU() {
// find the closest point in the scene for each point in the target
for (int i = 0; i < sizeTarget; i++) {
float best = glm::distance(glm::vec3(host_pos[0]), glm::vec3(host_pos[i + sizeScene]));
host_pair[i] = 0;
for (int j = 1; j < sizeScene; j++) {
float d = glm::distance(glm::vec3(host_pos[j]), glm::vec3(host_pos[i + sizeScene]));
if (d < best) {
host_pair[i] = j;
best = d;
}
}
}
// Calculate mean centered correspondenses
glm::vec3 mu_tar(0, 0, 0), mu_cor(0, 0, 0);
std::vector<glm::vec3> tar_c;
std::vector<glm::vec3> cor_c;
for (int i = 0; i < sizeTarget; i++) {
mu_tar += glm::vec3(host_pos[i + sizeScene]);
mu_cor += glm::vec3(host_pos[host_pair[i]]);
}
mu_tar /= sizeTarget;
mu_cor /= sizeTarget;
for (int i = 0; i < sizeTarget; i++) {
tar_c.push_back(glm::vec3(host_pos[i + sizeScene]) - mu_tar);
cor_c.push_back(glm::vec3(host_pos[host_pair[i]]) - mu_cor);
}
// Calculate W
float W[3][3] = {0};
for (int i = 0; i < sizeTarget; i++) {
W[0][0] += tar_c[i].x * cor_c[i].x;
W[0][1] += tar_c[i].y * cor_c[i].x;
W[0][2] += tar_c[i].z * cor_c[i].x;
W[1][0] += tar_c[i].x * cor_c[i].y;
W[1][1] += tar_c[i].y * cor_c[i].y;
W[1][2] += tar_c[i].z * cor_c[i].y;
W[2][0] += tar_c[i].x * cor_c[i].z;
W[2][1] += tar_c[i].y * cor_c[i].z;
W[2][2] += tar_c[i].z * cor_c[i].z;
}
// calculate SVD of W
float U[3][3] = { 0 };
float S[3][3] = { 0 };
float V[3][3] = { 0 };
svd(W[0][0], W[0][1], W[0][2], W[1][0], W[1][1], W[1][2], W[2][0], W[2][1], W[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 g_U(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 g_Vt(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
// Get transformation from SVD
glm::mat3 R =g_U * g_Vt;
glm::vec3 t = mu_cor - R*mu_tar;
// update target points
for (int i = 0; i < sizeTarget; i++) {
host_pos[i + sizeScene] = glm::vec4(R*glm::vec3(host_pos[i + sizeScene]) + t, host_pos[i + sizeScene].w);
}
hipMemcpyAsync(&dev_pos[sizeScene], &host_pos[sizeScene], sizeTarget * sizeof(glm::vec4), hipMemcpyHostToDevice, cudaStreamPerThread);
}
/**
* Step the ICP algorithm.
*/
bool ICP::iterateGPU() {
int max_iter = 25;
int iter = 0;
auto start = std::chrono::high_resolution_clock::now();
glm::vec3 t(0, 0, 0);
glm::vec3 r(0, 0, 0);
glm::vec3 s(1, 1, 1);
glm::mat4 total_transform = utilityCore::buildTransformationMatrix(t, r, s);
// glm::mat4 total_transform = glm::mat4( 1.0 );
while (iter < max_iter)
{
printf("Iteration:%d\n", iter);
stepGPU(total_transform);
iter++;
}
printf("Final Transform : \n");
// total_transform = glm::inverse(total_transform);
utilityCore::printMat4(total_transform);
auto finish = std::chrono::high_resolution_clock::now();
std::cout << "iterateGPU() took "
<< std::chrono::duration_cast<milli>(finish - start).count()
<< " milliseconds\n";
return true;
}
void ICP::stepGPU(glm::mat4& total_transform) {
dim3 fullBlocksPerGrid((sizeTarget + blockSize - 1) / blockSize);
// find the closest point in the scene for each point in the target
glm::vec4 *dev_cor, *tar_c, *cor_c;
glm::mat3 *dev_W;
float *euclidean_error;
hipMalloc((void**)&dev_cor, sizeTarget*sizeof(glm::vec4));
hipMalloc((void**)&tar_c, sizeTarget*sizeof(glm::vec4));
hipMalloc((void**)&cor_c, sizeTarget*sizeof(glm::vec4));
hipMalloc((void**)&dev_W, sizeTarget * sizeof(glm::mat3));
hipMalloc((void**)&euclidean_error, sizeTarget * sizeof(float));
hipMemset(dev_W, 0, sizeTarget * sizeof(glm::mat3));
#if KD_TREE_SEARCH
findCorrespondenceKD << <fullBlocksPerGrid, blockSize >> >(sizeTarget, sizeScene, dev_cor, dev_pos, dev_kd);
#else
findCorrespondence << <fullBlocksPerGrid, blockSize >> >(sizeTarget, sizeScene, dev_cor, dev_pos);
#endif
hipStreamSynchronize(cudaStreamPerThread);
// dev_cor contains correspondences of rendered points in observed cloud
// Calculate mean centered correspondenses
glm::vec3 mu_tar(0, 0, 0), mu_cor(0, 0, 0);
thrust::device_ptr<glm::vec4> ptr_target(&dev_pos[sizeScene]);
thrust::device_ptr<glm::vec4> ptr_scene(dev_pos);
thrust::device_ptr<glm::vec4> ptr_cor(dev_cor);
mu_tar = glm::vec3(thrust::reduce(thrust::hip::par.on(cudaStreamPerThread), ptr_target, ptr_target + sizeTarget, glm::vec4(0, 0, 0, 0)));
mu_cor = glm::vec3(thrust::reduce(thrust::hip::par.on(cudaStreamPerThread), ptr_cor, ptr_cor + sizeTarget, glm::vec4(0, 0, 0, 0)));
// get mean
mu_tar /= sizeTarget;
mu_cor /= sizeTarget;
// utilityCore::printVec3(mu_tar);
printf("Target mean (x,y,z) : %f,%f,%f\n", mu_tar.x, mu_tar.y, mu_tar.z);
printf("Corr mean (x,y,z) : %f,%f,%f\n", mu_cor.x, mu_cor.y, mu_cor.z);
hipMemcpyAsync(tar_c, &dev_pos[sizeScene], sizeTarget*sizeof(glm::vec4), hipMemcpyDeviceToDevice, cudaStreamPerThread);
hipMemcpyAsync(cor_c, dev_cor, sizeTarget*sizeof(glm::vec4), hipMemcpyDeviceToDevice, cudaStreamPerThread);
hipStreamSynchronize(cudaStreamPerThread);
glm::vec3 r(0, 0, 0);
glm::vec3 s(1, 1, 1);
glm::mat4 center_tar = utilityCore::buildTransformationMatrix(-mu_tar, r, s);
glm::mat4 center_cor = utilityCore::buildTransformationMatrix(-mu_cor, r, s);
// utilityCore::printMat4(center_tar);
transformPoint << <fullBlocksPerGrid, blockSize >> >(sizeTarget, tar_c, center_tar);
checkCUDAErrorWithLine("mean centered transformation 1 failed!");
hipStreamSynchronize(cudaStreamPerThread);
transformPoint << <fullBlocksPerGrid, blockSize >> >(sizeTarget, cor_c, center_cor);
checkCUDAErrorWithLine("mean centered transformation 2 failed!");
hipStreamSynchronize(cudaStreamPerThread);
euclideanError << <fullBlocksPerGrid, blockSize >> >(sizeTarget, tar_c, cor_c, euclidean_error);
thrust::device_ptr<float> ptr_error(euclidean_error);
float total_error = thrust::reduce(thrust::hip::par.on(cudaStreamPerThread), ptr_error, ptr_error + sizeTarget, (float) 0.0, thrust::plus<float>());
// for (int i = 0; i < sizeTarget; i++)
// {
// printf("Dist:%f\n", euclidean_error[i]);
// }
printf("Euclidean Error:%f\n", total_error);
checkCUDAErrorWithLine("euclideanError failed!");
hipStreamSynchronize(cudaStreamPerThread);
// Calculate W
outerProduct << <fullBlocksPerGrid, blockSize >> >(sizeTarget, tar_c, cor_c, dev_W);
thrust::device_ptr<glm::mat3> ptr_W(dev_W);
glm::mat3 W = thrust::reduce(thrust::hip::par.on(cudaStreamPerThread), ptr_W, ptr_W + sizeTarget, glm::mat3(0));
checkCUDAErrorWithLine("outer product failed!");
hipStreamSynchronize(cudaStreamPerThread);
// calculate SVD of W
glm::mat3 U, S, V;
svd(W[0][0], W[0][1], W[0][2], W[1][0], W[1][1], W[1][2], W[2][0], W[2][1], W[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 g_U(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 g_Vt(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
// Get transformation from SVD
glm::mat3 R = g_U * g_Vt;
glm::vec3 t = glm::vec3(mu_cor) - R*glm::vec3(mu_tar);
// update target points
glm::mat4 transform = glm::translate(glm::mat4(), t) * glm::mat4(R);
transformPoint << <fullBlocksPerGrid, blockSize >> >(sizeTarget, &dev_pos[sizeScene], transform);
// total_transform *= transform;
total_transform = transform * total_transform;
checkCUDAErrorWithLine("transform failed!");
hipStreamSynchronize(cudaStreamPerThread);
// utilityCore::printMat4(transform);
// std::cout << glm::to_string(transform) << std::endl;
// int i,j;
// for (j=0; j<4; j++){
// for (i=0; i<4; i++){
// printf("%f ",transform[i][j]);
// }
// printf("\n");
// }
// printf("\n");
// printf("\n");
hipFree(dev_cor);
hipFree(tar_c);
hipFree(cor_c);
hipFree(dev_W);
}
void ICP::checkConvergence(int thresh) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
}
void ICP::unitTest() {
std::vector<glm::vec4> test;
for (int i = 0; i < 16; i++)
test.push_back(glm::vec4((6 - 2 * i) % 3, -i % 4, i, i));
KDTree::Node *nd = new KDTree::Node[16];
KDTree::Create(test, nd);
printf("nodes: \n");
for (int i = 0; i < 16; i++)
printf(" %i: parent(%i), axis(%i), children(%i %i), val (%f %f %f)\n", i,
nd[i].parent, nd[i].axis, nd[i].left, nd[i].right, nd[i].value.x, nd[i].value.y, nd[i].value.z);
glm::vec4 a(.1, .2, .3, 1234);
glm::vec3 b = glm::vec3(a);
//printf("\nb: %f %f %f (%f)\n", b.x, b.y, b.z, a.w);
} | 88cfcd24658e3bf9c0c6a0ca898a8b256aa6df9b.cu | #define GLM_FORCE_CUDA
#include <stdio.h>
#include <cuda.h>
#include <cmath>
#include <algorithm>
#include <thrust/reduce.h>
#include <thrust/gather.h>
#include <thrust/functional.h>
#include <glm/glm.hpp>
#include <glm/gtc/type_ptr.hpp>
#include <glm/gtc/matrix_transform.hpp>
#include "cuda_icp_custom/utilityCore.hpp"
#include "cuda_icp_custom/svd3.h"
#include "cuda_icp_custom/kernel.h"
#include "device_launch_parameters.h"
/**
* Check for CUDA errors; print and exit if there was a problem.
*/
void checkCUDAError(const char *msg, int line = -1) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess != err) {
if (line >= 0) {
fprintf(stderr, "Line %d: ", line);
}
fprintf(stderr, "Cuda error: %s: %s.\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
}
// /*****************
// * Configuration *
// *****************/
// /*! Block size used for CUDA kernel launch. */
// #define blockSize 256
// #define sharedMemorySize 65536
// /*! Size of the starting area in simulation space. */
// // #define scene_scale 50.0f
// #define scene_scale 1.0f
dim3 threadsPerBlock(blockSize);
/***********************************************
* Kernel state (pointers are device pointers) *
***********************************************/
// int sizeTarget;
// int sizeScene;
// int numObjects;
// dim3 threadsPerBlock(blockSize);
// glm::vec4 *dev_pos;
// glm::vec3 *dev_color;
// int *dev_dist;
// int *dev_pair;
// KDTree::Node *dev_kd;
// glm::vec4 *host_pos;
// int *host_dist;
// int *host_pair;
/******************
* initSimulation *
******************/
__host__ __device__ unsigned int hash(unsigned int a) {
a = (a + 0x7ed55d16) + (a << 12);
a = (a ^ 0xc761c23c) ^ (a >> 19);
a = (a + 0x165667b1) + (a << 5);
a = (a + 0xd3a2646c) ^ (a << 9);
a = (a + 0xfd7046c5) + (a << 3);
a = (a ^ 0xb55a4f09) ^ (a >> 16);
return a;
}
__host__ __device__ bool sortFuncX(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.x < p2.x;
}
__host__ __device__ bool sortFuncY(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.y < p2.y;
}
__host__ __device__ bool sortFuncZ(const glm::vec4 &p1, const glm::vec4 &p2)
{
return p1.z < p2.z;
}
__global__ void transformPoint(int N, glm::vec4 *points, glm::mat4 transform) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= N) {
return;
}
points[index] = glm::vec4(glm::vec3(transform * glm::vec4(glm::vec3(points[index]), 1)), 1);
}
__global__ void kernResetVec3Buffer(int N, glm::vec3 *intBuffer, glm::vec3 value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
__global__ void kernResetIntBuffer(int N, int *intBuffer, int value) {
int index = (blockIdx.x * blockDim.x) + threadIdx.x;
if (index < N) {
intBuffer[index] = value;
}
}
ICP::ICP() {}
/**
* Initialize memory, update some globals
*/
void ICP::initSimulation(std::vector<glm::vec4> scene, std::vector<glm::vec4> target, KDTree::Node *kd) {
// cudaSetDevice(0);
sizeScene = scene.size();
sizeTarget = target.size();
numObjects = sizeScene + sizeTarget;
cudaMalloc((void**)&dev_pos, numObjects * sizeof(glm::vec4));
checkCUDAErrorWithLine("cudaMalloc dev_pos failed!");
cudaMalloc((void**)&dev_color, numObjects * sizeof(glm::vec4));
checkCUDAErrorWithLine("cudaMalloc dev_color failed!");
cudaMalloc((void**)&dev_dist, sizeTarget * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_dist failed!");
cudaMalloc((void**)&dev_pair, sizeTarget * sizeof(int));
checkCUDAErrorWithLine("cudaMalloc dev_pair failed!");
cudaMalloc((void**)&dev_kd, sizeScene * sizeof(KDTree::Node));
checkCUDAErrorWithLine("cudaMalloc dev_kd failed!");
int checksum = 0;
for (int i = 0; i < sizeScene; i++)
checksum += scene[i].w;
// KDTree::Node *kd = new KDTree::Node[sizeScene];
// KDTree::Create(scene, kd);
cudaMemcpyAsync(dev_kd, kd, sizeScene*sizeof(KDTree::Node), cudaMemcpyHostToDevice, cudaStreamPerThread);
int testsum = 0;
for (int i = 0; i < sizeScene; i++) {
testsum += kd[i].value.w;
}
printf("kd size: %i\n", sizeScene*sizeof(KDTree::Node));
//verify all items are in the kd tree
assert(checksum == testsum);
// copy both scene and target to output points
// observed scene points
cudaMemcpyAsync(dev_pos, &scene[0], scene.size()*sizeof(glm::vec4), cudaMemcpyHostToDevice, cudaStreamPerThread);
// rendered points
cudaMemcpyAsync(&dev_pos[scene.size()], &target[0], target.size()*sizeof(glm::vec4), cudaMemcpyHostToDevice, cudaStreamPerThread);
#if INITIAL_ROT
//add rotation and translation to target for test;
cout << "Applying random initial transformation\n";
glm::vec3 t(20, -22, 20);
glm::vec3 r(-.5, .6, .8);
glm::vec3 s(1, 1, 1);
glm::mat4 initial_rot = utilityCore::buildTransformationMatrix(t, r, s);
transformPoint << <dim3((target.size() + blockSize - 1) / blockSize), blockSize >> >(target.size(), &dev_pos[scene.size()], initial_rot);
#endif
//set colors for points
kernResetVec3Buffer << <dim3((scene.size() + blockSize - 1) / blockSize), blockSize >> >(scene.size(), dev_color, glm::vec3(1, 1, 1));
kernResetVec3Buffer << <dim3((target.size() + blockSize - 1) / blockSize), blockSize >> >(target.size(), &dev_color[scene.size()], glm::vec3(0, 1, 0));
// cudaStreamSynchronize(cudaStreamPerThread);
cudaStreamSynchronize(cudaStreamPerThread);
host_pos = (glm::vec4*) malloc(numObjects * sizeof(glm::vec4));
host_pair = (int*)malloc(target.size() * sizeof(int));
cudaMemcpyAsync(host_pos, dev_pos, numObjects * sizeof(glm::vec4), cudaMemcpyDeviceToHost, cudaStreamPerThread);
// cudaStreamSynchronize(cudaStreamPerThread);
cudaStreamSynchronize(cudaStreamPerThread);
}
void ICP::endSimulation() {
cudaFree(dev_pos);
cudaFree(dev_color);
cudaFree(dev_dist);
cudaFree(dev_pair);
cudaFree(dev_kd);
free(host_pos);
free(host_pair);
cudaStreamDestroy(cudaStreamPerThread);
}
/******************
* copyBoidsToVBO *
******************/
/**
* Copy the boid positions into the VBO so that they can be drawn by OpenGL.
*/
__global__ void kernCopyPositionsToVBO(int N, glm::vec4 *pos, float *vbo, float s_scale, int start) {
int index = threadIdx.x + (blockIdx.x * blockDim.x) + start;
float c_scale = -1.0f / s_scale;
if (index - start < N) {
vbo[4 * index + 0] = pos[index].x * c_scale;
vbo[4 * index + 1] = pos[index].y * c_scale;
vbo[4 * index + 2] = pos[index].z * c_scale;
vbo[4 * index + 3] = 1.0f;
}
}
__global__ void kernCopyColorsToVBO(int N, glm::vec3 *color, float *vbo, float s_scale, int start) {
int index = threadIdx.x + (blockIdx.x * blockDim.x) + start;
if (index - start < N) {
vbo[4 * index + 0] = color[index].x + 0.3f;
vbo[4 * index + 1] = color[index].y + 0.3f;
vbo[4 * index + 2] = color[index].z + 0.3f;
vbo[4 * index + 3] = 1.0f;
}
}
/**
* Wrapper for call to the kernCopyboidsToVBO CUDA kernel.
*/
void ICP::copyPointsToVBO(float *vbodptr_positions, float *vbodptr_velocities) {
//batch copies to prevent memory errors
int batchSize = 1 << 16;
for (int i = 0; i <= numObjects; i += batchSize) {
int n = imin(batchSize, numObjects - i);
dim3 fullBlocksPerGrid((n + blockSize - 1) / blockSize);
kernCopyPositionsToVBO << <fullBlocksPerGrid, blockSize >> >(n, dev_pos, vbodptr_positions, scene_scale, i);
kernCopyColorsToVBO << <fullBlocksPerGrid, blockSize >> >(n, dev_color, vbodptr_velocities, scene_scale, i);
}
checkCUDAErrorWithLine("copyBoidsToVBO color failed!");
cudaStreamSynchronize(cudaStreamPerThread);
}
/******************
* stepSimulation *
******************/
__global__ void findCorrespondence(int N, int sizeScene, glm::vec4 *cor, const glm::vec4 *points)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
glm::vec4 pt = points[i + sizeScene];
float best = glm::distance(glm::vec3(points[0]), glm::vec3(pt));
cor[i] = points[0];
for (int j = 1; j < sizeScene; j++) {
float d = glm::distance(glm::vec3(points[j]), glm::vec3(pt));
if (d < best) {
cor[i] = points[j];
best = d;
}
}
}
__device__ float getHyperplaneDist(const glm::vec4 *pt1, const glm::vec4 *pt2, int axis, bool *branch)
{
if (axis == 0) {
*branch = sortFuncX(*pt1, *pt2);
return abs(pt1->x - pt2->x);
}
if (axis == 1) {
*branch = sortFuncY(*pt1, *pt2);
return abs(pt1->y - pt2->y);
}
if (axis == 2) {
*branch = sortFuncZ(*pt1, *pt2);
return abs(pt1->z - pt2->z);
}
}
__global__ void findCorrespondenceKD(int N, int sizeScene, glm::vec4 *cor, const glm::vec4 *points, const KDTree::Node* tree)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
glm::vec4 pt = points[i + sizeScene];
float bestDist = glm::distance(glm::vec3(pt), glm::vec3(tree[0].value));
int bestIdx = 0;
int head = 0;
bool done = false;
bool branch = false;
bool nodeFullyExplored = false;
while (!done) {
// depth first on current branch
while (head >= 0) {
// check the current node
const KDTree::Node test = tree[head];
float d = glm::distance(glm::vec3(pt), glm::vec3(test.value));
if (d < bestDist) {
bestDist = d;
bestIdx = head;
nodeFullyExplored = false;
}
// find branch path
getHyperplaneDist(&pt, &test.value, test.axis, &branch);
head = branch ? test.left : test.right;
}
if (nodeFullyExplored) {
done = true;
}
else {
// check if parent of best node could have better values on other branch
const KDTree::Node parent = tree[tree[bestIdx].parent];
if (getHyperplaneDist(&pt, &parent.value, parent.axis, &branch) < bestDist) {
head = !branch ? parent.left : parent.right;
nodeFullyExplored = true;
}
else
done = true;
}
}
cor[i] = tree[bestIdx].value;
}
__global__ void outerProduct(int N, const glm::vec4 *vec1, const glm::vec4 *vec2, glm::mat3 *out)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
out[i] = glm::mat3( glm::vec3(vec1[i]) * vec2[i].x,
glm::vec3(vec1[i]) * vec2[i].y,
glm::vec3(vec1[i]) * vec2[i].z);
}
__global__ void euclideanError(int N, const glm::vec4 *vec1, const glm::vec4 *vec2, float *out)
{
int i = threadIdx.x + (blockIdx.x * blockDim.x);
if (i >= N) {
return;
}
out[i] = sqrt((vec1[i].x - vec2[i].x) * (vec1[i].x - vec2[i].x) +
(vec1[i].y - vec2[i].y) * (vec1[i].y - vec2[i].y) +
(vec1[i].z - vec2[i].z) * (vec1[i].z - vec2[i].z));
}
/**
* Step the ICP algorithm.
*/
void ICP::stepCPU() {
// find the closest point in the scene for each point in the target
for (int i = 0; i < sizeTarget; i++) {
float best = glm::distance(glm::vec3(host_pos[0]), glm::vec3(host_pos[i + sizeScene]));
host_pair[i] = 0;
for (int j = 1; j < sizeScene; j++) {
float d = glm::distance(glm::vec3(host_pos[j]), glm::vec3(host_pos[i + sizeScene]));
if (d < best) {
host_pair[i] = j;
best = d;
}
}
}
// Calculate mean centered correspondenses
glm::vec3 mu_tar(0, 0, 0), mu_cor(0, 0, 0);
std::vector<glm::vec3> tar_c;
std::vector<glm::vec3> cor_c;
for (int i = 0; i < sizeTarget; i++) {
mu_tar += glm::vec3(host_pos[i + sizeScene]);
mu_cor += glm::vec3(host_pos[host_pair[i]]);
}
mu_tar /= sizeTarget;
mu_cor /= sizeTarget;
for (int i = 0; i < sizeTarget; i++) {
tar_c.push_back(glm::vec3(host_pos[i + sizeScene]) - mu_tar);
cor_c.push_back(glm::vec3(host_pos[host_pair[i]]) - mu_cor);
}
// Calculate W
float W[3][3] = {0};
for (int i = 0; i < sizeTarget; i++) {
W[0][0] += tar_c[i].x * cor_c[i].x;
W[0][1] += tar_c[i].y * cor_c[i].x;
W[0][2] += tar_c[i].z * cor_c[i].x;
W[1][0] += tar_c[i].x * cor_c[i].y;
W[1][1] += tar_c[i].y * cor_c[i].y;
W[1][2] += tar_c[i].z * cor_c[i].y;
W[2][0] += tar_c[i].x * cor_c[i].z;
W[2][1] += tar_c[i].y * cor_c[i].z;
W[2][2] += tar_c[i].z * cor_c[i].z;
}
// calculate SVD of W
float U[3][3] = { 0 };
float S[3][3] = { 0 };
float V[3][3] = { 0 };
svd(W[0][0], W[0][1], W[0][2], W[1][0], W[1][1], W[1][2], W[2][0], W[2][1], W[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 g_U(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 g_Vt(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
// Get transformation from SVD
glm::mat3 R =g_U * g_Vt;
glm::vec3 t = mu_cor - R*mu_tar;
// update target points
for (int i = 0; i < sizeTarget; i++) {
host_pos[i + sizeScene] = glm::vec4(R*glm::vec3(host_pos[i + sizeScene]) + t, host_pos[i + sizeScene].w);
}
cudaMemcpyAsync(&dev_pos[sizeScene], &host_pos[sizeScene], sizeTarget * sizeof(glm::vec4), cudaMemcpyHostToDevice, cudaStreamPerThread);
}
/**
* Step the ICP algorithm.
*/
bool ICP::iterateGPU() {
int max_iter = 25;
int iter = 0;
auto start = std::chrono::high_resolution_clock::now();
glm::vec3 t(0, 0, 0);
glm::vec3 r(0, 0, 0);
glm::vec3 s(1, 1, 1);
glm::mat4 total_transform = utilityCore::buildTransformationMatrix(t, r, s);
// glm::mat4 total_transform = glm::mat4( 1.0 );
while (iter < max_iter)
{
printf("Iteration:%d\n", iter);
stepGPU(total_transform);
iter++;
}
printf("Final Transform : \n");
// total_transform = glm::inverse(total_transform);
utilityCore::printMat4(total_transform);
auto finish = std::chrono::high_resolution_clock::now();
std::cout << "iterateGPU() took "
<< std::chrono::duration_cast<milli>(finish - start).count()
<< " milliseconds\n";
return true;
}
void ICP::stepGPU(glm::mat4& total_transform) {
dim3 fullBlocksPerGrid((sizeTarget + blockSize - 1) / blockSize);
// find the closest point in the scene for each point in the target
glm::vec4 *dev_cor, *tar_c, *cor_c;
glm::mat3 *dev_W;
float *euclidean_error;
cudaMalloc((void**)&dev_cor, sizeTarget*sizeof(glm::vec4));
cudaMalloc((void**)&tar_c, sizeTarget*sizeof(glm::vec4));
cudaMalloc((void**)&cor_c, sizeTarget*sizeof(glm::vec4));
cudaMalloc((void**)&dev_W, sizeTarget * sizeof(glm::mat3));
cudaMalloc((void**)&euclidean_error, sizeTarget * sizeof(float));
cudaMemset(dev_W, 0, sizeTarget * sizeof(glm::mat3));
#if KD_TREE_SEARCH
findCorrespondenceKD << <fullBlocksPerGrid, blockSize >> >(sizeTarget, sizeScene, dev_cor, dev_pos, dev_kd);
#else
findCorrespondence << <fullBlocksPerGrid, blockSize >> >(sizeTarget, sizeScene, dev_cor, dev_pos);
#endif
cudaStreamSynchronize(cudaStreamPerThread);
// dev_cor contains correspondences of rendered points in observed cloud
// Calculate mean centered correspondenses
glm::vec3 mu_tar(0, 0, 0), mu_cor(0, 0, 0);
thrust::device_ptr<glm::vec4> ptr_target(&dev_pos[sizeScene]);
thrust::device_ptr<glm::vec4> ptr_scene(dev_pos);
thrust::device_ptr<glm::vec4> ptr_cor(dev_cor);
mu_tar = glm::vec3(thrust::reduce(thrust::cuda::par.on(cudaStreamPerThread), ptr_target, ptr_target + sizeTarget, glm::vec4(0, 0, 0, 0)));
mu_cor = glm::vec3(thrust::reduce(thrust::cuda::par.on(cudaStreamPerThread), ptr_cor, ptr_cor + sizeTarget, glm::vec4(0, 0, 0, 0)));
// get mean
mu_tar /= sizeTarget;
mu_cor /= sizeTarget;
// utilityCore::printVec3(mu_tar);
printf("Target mean (x,y,z) : %f,%f,%f\n", mu_tar.x, mu_tar.y, mu_tar.z);
printf("Corr mean (x,y,z) : %f,%f,%f\n", mu_cor.x, mu_cor.y, mu_cor.z);
cudaMemcpyAsync(tar_c, &dev_pos[sizeScene], sizeTarget*sizeof(glm::vec4), cudaMemcpyDeviceToDevice, cudaStreamPerThread);
cudaMemcpyAsync(cor_c, dev_cor, sizeTarget*sizeof(glm::vec4), cudaMemcpyDeviceToDevice, cudaStreamPerThread);
cudaStreamSynchronize(cudaStreamPerThread);
glm::vec3 r(0, 0, 0);
glm::vec3 s(1, 1, 1);
glm::mat4 center_tar = utilityCore::buildTransformationMatrix(-mu_tar, r, s);
glm::mat4 center_cor = utilityCore::buildTransformationMatrix(-mu_cor, r, s);
// utilityCore::printMat4(center_tar);
transformPoint << <fullBlocksPerGrid, blockSize >> >(sizeTarget, tar_c, center_tar);
checkCUDAErrorWithLine("mean centered transformation 1 failed!");
cudaStreamSynchronize(cudaStreamPerThread);
transformPoint << <fullBlocksPerGrid, blockSize >> >(sizeTarget, cor_c, center_cor);
checkCUDAErrorWithLine("mean centered transformation 2 failed!");
cudaStreamSynchronize(cudaStreamPerThread);
euclideanError << <fullBlocksPerGrid, blockSize >> >(sizeTarget, tar_c, cor_c, euclidean_error);
thrust::device_ptr<float> ptr_error(euclidean_error);
float total_error = thrust::reduce(thrust::cuda::par.on(cudaStreamPerThread), ptr_error, ptr_error + sizeTarget, (float) 0.0, thrust::plus<float>());
// for (int i = 0; i < sizeTarget; i++)
// {
// printf("Dist:%f\n", euclidean_error[i]);
// }
printf("Euclidean Error:%f\n", total_error);
checkCUDAErrorWithLine("euclideanError failed!");
cudaStreamSynchronize(cudaStreamPerThread);
// Calculate W
outerProduct << <fullBlocksPerGrid, blockSize >> >(sizeTarget, tar_c, cor_c, dev_W);
thrust::device_ptr<glm::mat3> ptr_W(dev_W);
glm::mat3 W = thrust::reduce(thrust::cuda::par.on(cudaStreamPerThread), ptr_W, ptr_W + sizeTarget, glm::mat3(0));
checkCUDAErrorWithLine("outer product failed!");
cudaStreamSynchronize(cudaStreamPerThread);
// calculate SVD of W
glm::mat3 U, S, V;
svd(W[0][0], W[0][1], W[0][2], W[1][0], W[1][1], W[1][2], W[2][0], W[2][1], W[2][2],
U[0][0], U[0][1], U[0][2], U[1][0], U[1][1], U[1][2], U[2][0], U[2][1], U[2][2],
S[0][0], S[0][1], S[0][2], S[1][0], S[1][1], S[1][2], S[2][0], S[2][1], S[2][2],
V[0][0], V[0][1], V[0][2], V[1][0], V[1][1], V[1][2], V[2][0], V[2][1], V[2][2]
);
glm::mat3 g_U(glm::vec3(U[0][0], U[1][0], U[2][0]), glm::vec3(U[0][1], U[1][1], U[2][1]), glm::vec3(U[0][2], U[1][2], U[2][2]));
glm::mat3 g_Vt(glm::vec3(V[0][0], V[0][1], V[0][2]), glm::vec3(V[1][0], V[1][1], V[1][2]), glm::vec3(V[2][0], V[2][1], V[2][2]));
// Get transformation from SVD
glm::mat3 R = g_U * g_Vt;
glm::vec3 t = glm::vec3(mu_cor) - R*glm::vec3(mu_tar);
// update target points
glm::mat4 transform = glm::translate(glm::mat4(), t) * glm::mat4(R);
transformPoint << <fullBlocksPerGrid, blockSize >> >(sizeTarget, &dev_pos[sizeScene], transform);
// total_transform *= transform;
total_transform = transform * total_transform;
checkCUDAErrorWithLine("transform failed!");
cudaStreamSynchronize(cudaStreamPerThread);
// utilityCore::printMat4(transform);
// std::cout << glm::to_string(transform) << std::endl;
// int i,j;
// for (j=0; j<4; j++){
// for (i=0; i<4; i++){
// printf("%f ",transform[i][j]);
// }
// printf("\n");
// }
// printf("\n");
// printf("\n");
cudaFree(dev_cor);
cudaFree(tar_c);
cudaFree(cor_c);
cudaFree(dev_W);
}
void ICP::checkConvergence(int thresh) {
dim3 fullBlocksPerGrid((numObjects + blockSize - 1) / blockSize);
}
void ICP::unitTest() {
std::vector<glm::vec4> test;
for (int i = 0; i < 16; i++)
test.push_back(glm::vec4((6 - 2 * i) % 3, -i % 4, i, i));
KDTree::Node *nd = new KDTree::Node[16];
KDTree::Create(test, nd);
printf("nodes: \n");
for (int i = 0; i < 16; i++)
printf(" %i: parent(%i), axis(%i), children(%i %i), val (%f %f %f)\n", i,
nd[i].parent, nd[i].axis, nd[i].left, nd[i].right, nd[i].value.x, nd[i].value.y, nd[i].value.z);
glm::vec4 a(.1, .2, .3, 1234);
glm::vec3 b = glm::vec3(a);
//printf("\nb: %f %f %f (%f)\n", b.x, b.y, b.z, a.w);
} |
ca077204280d005f55a3709a6b062e250569c813.hip | // !!! This is a file automatically generated by hipify!!!
// clang-format off
/************************************************************************************\
* *
* Copyright 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR") (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <hip/hip_runtime.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../../graph_parser/parse.h"
#include "../../graph_parser/util.h"
#include "kernel.hip"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
hipError_t err = hipSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *index_d;
// Create device-side buffers for the graph
err = hipMalloc(&row_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc row_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&col_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc col_d (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMalloc(&incol_d, num_edges * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc incol_d (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
// Create buffers for index
err = hipMalloc(&index_d, num_nodes * sizeof(int));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc index_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = hipMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy row_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy col_d (size:%d) => %s\n", num_nodes,
hipGetErrorString(err));
return -1;
}
err = hipMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR:#endif hipMemcpy inrow_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
err = hipMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
hipMemcpyHostToDevice);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy incol_d (size:%d) => %s\n",
num_nodes, hipGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
hipDeviceSetLimit(hipLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
VirtVertex<int, int> **vertex;
GraphChiContext *context;
err = hipMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc vertex (size:%d) => %s\n", num_edges,
hipGetErrorString(err));
return -1;
}
err = hipMalloc(&context, sizeof(GraphChiContext));
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMalloc context (size:%d) => %s\n",
num_edges, hipGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
hipLaunchKernelGGL(( initContext), dim3(1), dim3(1), 0, 0, context, num_nodes, num_edges);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initObj\n");
hipLaunchKernelGGL(( initObject), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d, inrow_d,
incol_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
hipGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
hipLaunchKernelGGL(( initOutEdge), dim3(grid), dim3(threads), 0, 0, vertex, context, row_d, col_d);
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Run BFS for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start BFS\n");
hipLaunchKernelGGL(( BFS), dim3(grid), dim3(threads), 0, 0, vertex, context, i);
printf("Finish BFS\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
}
hipDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
hipLaunchKernelGGL(( copyBack), dim3(grid), dim3(threads), 0, 0, vertex, context, index_d);
printf("End Copyback\n");
hipDeviceSynchronize();
err = hipGetLastError();
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipLaunch failed (%s)\n",
hipGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = hipMemcpy(rank_array, index_d, num_nodes * sizeof(int),
hipMemcpyDeviceToHost);
if (err != hipSuccess) {
fprintf(stderr, "ERROR: hipMemcpy() failed (%s)\n",
hipGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
hipFree(row_d);
hipFree(col_d);
hipFree(inrow_d);
hipFree(incol_d);
hipFree(index_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
| ca077204280d005f55a3709a6b062e250569c813.cu | // clang-format off
/************************************************************************************\
* *
* Copyright � 2014 Advanced Micro Devices, Inc. *
* Copyright (c) 2015 Mark D. Hill and David A. Wood *
* All rights reserved. *
* *
* Redistribution and use in source and binary forms, with or without *
* modification, are permitted provided that the following are met: *
* *
* You must reproduce the above copyright notice. *
* *
* Neither the name of the copyright holder nor the names of its contributors *
* may be used to endorse or promote products derived from this software *
* without specific, prior, written permission from at least the copyright holder. *
* *
* You must include the following terms in your license and/or other materials *
* provided with the software. *
* *
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" *
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE *
* IMPLIED WARRANTIES OF MERCHANTABILITY, NON-INFRINGEMENT, AND FITNESS FOR A *
* PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER *
* OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, *
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT *
* OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS *
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN *
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING *
* IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY *
* OF SUCH DAMAGE. *
* *
* Without limiting the foregoing, the software may implement third party *
* technologies for which you must obtain licenses from parties other than AMD. *
* You agree that AMD has not obtained or conveyed to you, and that you shall *
* be responsible for obtaining the rights to use and/or distribute the applicable *
* underlying intellectual property rights related to the third party technologies. *
* These third party technologies are not licensed hereunder. *
* *
* If you use the software (in whole or in part), you shall adhere to all *
* applicable U.S., European, and other export laws, including but not limited to *
* the U.S. Export Administration Regulations ("EAR"�) (15 C.F.R Sections 730-774), *
* and E.U. Council Regulation (EC) No 428/2009 of 5 May 2009. Further, pursuant *
* to Section 740.6 of the EAR, you hereby certify that, except pursuant to a *
* license granted by the United States Department of Commerce Bureau of Industry *
* and Security or as otherwise permitted pursuant to a License Exception under *
* the U.S. Export Administration Regulations ("EAR"), you will not (1) export, *
* re-export or release to a national of a country in Country Groups D:1, E:1 or *
* E:2 any restricted technology, software, or source code you receive hereunder, *
* or (2) export to Country Groups D:1, E:1 or E:2 the direct product of such *
* technology or software, if such foreign produced direct product is subject to *
* national security controls as identified on the Commerce Control List (currently *
* found in Supplement 1 to Part 774 of EAR). For the most current Country Group *
* listings, or for additional information about the EAR or your obligations under *
* those regulations, please refer to the U.S. Bureau of Industry and Security's *
* website at http://www.bis.doc.gov/. *
* *
\************************************************************************************/
// clang-format on
#include <cuda.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <sys/time.h>
#include "../../graph_parser/parse.h"
#include "../../graph_parser/util.h"
#include "kernel.cu"
// Iteration count
#define ITER 20
void print_vectorf(int *vector, int num);
int main(int argc, char **argv) {
char *tmpchar;
int num_nodes;
int num_edges;
int file_format = 1;
bool directed = 0;
cudaError_t err = cudaSuccess;
if (argc == 3) {
tmpchar = argv[1]; // Graph inputfile
file_format = atoi(argv[2]); // File format
} else {
fprintf(stderr, "You did something wrong!\n");
exit(1);
}
// Allocate the csr structure
csr_array *csr;
// Parse graph files into csr structure
if (file_format == 1) {
// Metis
csr = parseMetis(tmpchar, &num_nodes, &num_edges, directed);
} else if (file_format == 0) {
// Dimacs9
csr = parseCOO(tmpchar, &num_nodes, &num_edges, 1);
} else if (file_format == 2) {
// Matrix market
csr = parseMM(tmpchar, &num_nodes, &num_edges, directed, 0);
} else {
printf("reserve for future");
exit(1);
}
// Allocate rank_array
int *rank_array = (int *)malloc(num_nodes * sizeof(int));
if (!rank_array) {
fprintf(stderr, "rank array not allocated successfully\n");
return -1;
}
int *row_d;
int *col_d;
int *inrow_d;
int *incol_d;
int *index_d;
// Create device-side buffers for the graph
err = cudaMalloc(&row_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc row_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&col_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc col_d (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&inrow_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&incol_d, num_edges * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc incol_d (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
// Create buffers for index
err = cudaMalloc(&index_d, num_nodes * sizeof(int));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc index_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
double timer1 = gettime();
// Copy the data to the device-side buffers
err = cudaMemcpy(row_d, csr->row_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy row_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(col_d, csr->col_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy col_d (size:%d) => %s\n", num_nodes,
cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(inrow_d, csr->inrow_array, num_nodes * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR:#endif cudaMemcpy inrow_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
err = cudaMemcpy(incol_d, csr->incol_array, num_edges * sizeof(int),
cudaMemcpyHostToDevice);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy incol_d (size:%d) => %s\n",
num_nodes, cudaGetErrorString(err));
return -1;
}
// Set up work dimensions
int block_size = 256;
int num_blocks = (num_nodes + block_size - 1) / block_size;
dim3 threads(block_size, 1, 1);
dim3 grid(num_blocks, 1, 1);
cudaDeviceSetLimit(cudaLimitMallocHeapSize, 4ULL * 1024 * 1024 * 1024);
double timer3 = gettime();
VirtVertex<int, int> **vertex;
GraphChiContext *context;
err = cudaMalloc(&vertex, num_nodes * sizeof(ChiVertex<int, int> *));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc vertex (size:%d) => %s\n", num_edges,
cudaGetErrorString(err));
return -1;
}
err = cudaMalloc(&context, sizeof(GraphChiContext));
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMalloc context (size:%d) => %s\n",
num_edges, cudaGetErrorString(err));
return -1;
}
printf("Start initCtx\n");
initContext<<<1, 1>>>(context, num_nodes, num_edges);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initCtx failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initObj\n");
initObject<<<grid, threads>>>(vertex, context, row_d, col_d, inrow_d,
incol_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initObject failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
printf("Start initOutEdge\n");
initOutEdge<<<grid, threads>>>(vertex, context, row_d, col_d);
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: initOutEdge failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Run BFS for some iter. TO: convergence determination
for (int i = 0; i < ITER; i++) {
printf("Start BFS\n");
BFS<<<grid, threads>>>(vertex, context, i);
printf("Finish BFS\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
}
cudaDeviceSynchronize();
double timer4 = gettime();
printf("Start Copyback\n");
copyBack<<<grid, threads>>>(vertex, context, index_d);
printf("End Copyback\n");
cudaDeviceSynchronize();
err = cudaGetLastError();
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaLaunch failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
// Copy the rank buffer back
err = cudaMemcpy(rank_array, index_d, num_nodes * sizeof(int),
cudaMemcpyDeviceToHost);
if (err != cudaSuccess) {
fprintf(stderr, "ERROR: cudaMemcpy() failed (%s)\n",
cudaGetErrorString(err));
return -1;
}
double timer2 = gettime();
// Report timing characteristics
printf("kernel time = %lf ms\n", (timer4 - timer3) * 1000);
printf("kernel + memcpy time = %lf ms\n", (timer2 - timer1) * 1000);
#if 1
// Print rank array
print_vectorf(rank_array, num_nodes);
#endif
// Free the host-side arrays
free(rank_array);
csr->freeArrays();
free(csr);
// Free the device buffers
cudaFree(row_d);
cudaFree(col_d);
cudaFree(inrow_d);
cudaFree(incol_d);
cudaFree(index_d);
return 0;
}
void print_vectorf(int *vector, int num) {
FILE *fp = fopen("result.out", "w");
if (!fp) {
printf("ERROR: unable to open result.txt\n");
}
for (int i = 0; i < num; i++) {
fprintf(fp, "%d\n", vector[i]);
}
fclose(fp);
}
|
9b2a35f8ff8c0601443bed8ffe8fdb03c8e733f1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
__global__ void yourHisto(const unsigned int *const vals, // INPUT
unsigned int *const histo, // OUPUT
int numVals) {
// TODO fill in this kernel to calculate the histogram
// as quickly as possible
// Although we provide only one kernel skeleton,
// feel free to use more if it will help you
// write faster code
}
void computeHistogram(const unsigned int *const d_vals, // INPUT
unsigned int *const d_histo, // OUTPUT
const unsigned int numBins, const unsigned int numElems) {
// TODO Launch the yourHisto kernel
// if you want to use/launch more than one kernel,
// feel free
hipDeviceSynchronize();
checkCudaErrors(hipGetLastError());
}
| 9b2a35f8ff8c0601443bed8ffe8fdb03c8e733f1.cu | /* Udacity HW5
Histogramming for Speed
The goal of this assignment is compute a histogram
as fast as possible. We have simplified the problem as much as
possible to allow you to focus solely on the histogramming algorithm.
The input values that you need to histogram are already the exact
bins that need to be updated. This is unlike in HW3 where you needed
to compute the range of the data and then do:
bin = (val - valMin) / valRange to determine the bin.
Here the bin is just:
bin = val
so the serial histogram calculation looks like:
for (i = 0; i < numElems; ++i)
histo[val[i]]++;
That's it! Your job is to make it run as fast as possible!
The values are normally distributed - you may take
advantage of this fact in your implementation.
*/
#include "utils.h"
__global__ void yourHisto(const unsigned int *const vals, // INPUT
unsigned int *const histo, // OUPUT
int numVals) {
// TODO fill in this kernel to calculate the histogram
// as quickly as possible
// Although we provide only one kernel skeleton,
// feel free to use more if it will help you
// write faster code
}
void computeHistogram(const unsigned int *const d_vals, // INPUT
unsigned int *const d_histo, // OUTPUT
const unsigned int numBins, const unsigned int numElems) {
// TODO Launch the yourHisto kernel
// if you want to use/launch more than one kernel,
// feel free
cudaDeviceSynchronize();
checkCudaErrors(cudaGetLastError());
}
|
94188c45683b24e68a703e8eeeb44fb9819a0d6a.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <stdlib.h>
#include <hip/hip_runtime_api.h>
//#define N 1573700//1310720//262144//131072//262144//83886080
//Quantidade de threads por blocos
#define BLOCK_SIZE 32//1//1024//95536
#define nThreadsPerBlock 128//420//128//420 ou 416
#define NFinal (nThreadsPerBlock * 5)
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
hipError_t checkCuda(hipError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
#endif
return result;
}
__device__ int* memoria(int *vetDados, int ElemPorBlocos, int qtdProces){
__shared__ int vetComp[4096];
int auxGrupoDe32 = (qtdProces * 32);
int comecoBloco = blockIdx.x * ElemPorBlocos; // onde cada bloco ir comeca
int qtdElemThread = ElemPorBlocos / blockDim.x;
int idCompartilhada = threadIdx.x;
int idGlobal = comecoBloco + ((threadIdx.x / 32) * qtdElemThread) + (threadIdx.x - ((threadIdx.x / 32) * 32)) + auxGrupoDe32;
int i;
for(i = 0; i < 4096; i += blockDim.x){
vetComp[idCompartilhada] = vetDados[idGlobal];
idCompartilhada += blockDim.x;
idGlobal += (qtdElemThread * 4);
}
return vetComp;
}
__global__ void subSeqMax(int *vet, int *vetFinal, int ElemPorThread, int n){
__shared__ int *p; // ponteiro para apontar para o vetor compartilhado
// M t_m S suf
int ini_M, fim_M, t_M, ini_S, fim_S, suf; //Variaveis do algoritmo
t_M = suf = 0;
int comecoThread = (threadIdx.x * 32);
int j;
for(j = 0; j < (n / 4096); j++){ // Quantas vezes terei que processa at chegar no n/blocos sendo que o vet compartilhado de 4096
p = memoria(vet,n,j);
__syncthreads();
if(threadIdx.x < 128){
ini_M = fim_M = ini_S = fim_S = comecoThread -1;
int i;
for(i = comecoThread -1; i < comecoThread + 32; i++){
if(i == fim_M){
fim_S++;
suf += p[i+1];
if(suf < 0){
suf = 0;
fim_S = -1;
}
ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S
if(p[i+1] > 0){
fim_M++;
t_M += p[i+1];
ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M
}
}
else{
if(suf + p[i+1] > t_M){
fim_S++;
if(ini_M == -1){
fim_S = ini_S = i +1;
}
suf += p[i+1];
ini_M = ini_S;
fim_M = fim_S;
t_M = suf;
}
else{
if(suf + p[i+1] > 0){
fim_S++;
if(suf == 0){
ini_S = fim_S = i+1;
}
suf += p[i+1];
}
else{
ini_S = fim_S = i + 2;
suf = 0;
}
}//else
}//else
}// 1* for
}// If 128
}// 2* for
if(threadIdx.x < 128){
int idThread = blockIdx.x * blockDim.x + threadIdx.x;
vetFinal[(idThread * 5)] = vetFinal[(idThread * 5)+1] = vetFinal[(idThread * 5)+2] = vetFinal[(idThread * 5)+3] =
vetFinal[(idThread * 5)+4] = -1;
//Colocando o M
vetFinal[(idThread * 5)+2] = t_M;
//Calculando o Prefixo
int pref_Max, soma_Pref;
soma_Pref = 0;
pref_Max = 0;
int i;
if(ini_M > comecoThread -1){
for(i = 0; i < ini_M; i++){
soma_Pref += p[i];
if(soma_Pref > pref_Max){
pref_Max = soma_Pref;
}
}
if(pref_Max == 0){
vetFinal[(idThread * 5)] = 0;
vetFinal[(idThread * 5)+1] = soma_Pref;
}
else{
vetFinal[(idThread * 5)] = pref_Max; //Prefixo
vetFinal[(idThread * 5)+1] = soma_Pref - pref_Max; //Numeros negativos
}
}
//Calculo do sufixo
int suf_Max, soma_Suf;
soma_Suf = suf_Max = 0;
if(fim_M < comecoThread + 32){
for(i = (comecoThread + 32)-1; i > fim_M; i--){
soma_Suf += p[i];
if(soma_Suf > suf_Max){
suf_Max = soma_Suf;
}
}
if(suf_Max == 0){
vetFinal[(idThread * 5)+3] = 0; //Sufixo vazio
vetFinal[(idThread * 5)+4] = suf_Max;//Os Numeros negativos
}
else{
vetFinal[(idThread * 5)+3] = suf_Max; //Sufixo vazio
vetFinal[(idThread * 5)+4] = soma_Suf - suf_Max;//Os Numeros negativos
}
}
}//if 128
}
void subSeqMaxFinal(int *vet, int n){
// M t_m S suf
int ini_M, fim_M, t_M, ini_S, fim_S, suf;
ini_M = fim_M = ini_S = fim_S = -1;
t_M = suf = 0;
int i;
for(i = -1; i < n-1; i++){
if(i == fim_M){
fim_S++;
suf += vet[i+1];
if(suf < 0){
suf = 0;
fim_S = -1;
}
ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S
if(vet[i+1] > 0){
fim_M++;
t_M += vet[i+1];
ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M
}
}
else{
if(suf + vet[i+1] > t_M){
fim_S++;
if(ini_M == -1){
fim_S = ini_S = i +1;
}
suf += vet[i+1];
ini_M = ini_S;
fim_M = fim_S;
t_M = suf;
}
else{
if(suf + vet[i+1] > 0){
fim_S++;
if(suf == 0){
ini_S = fim_S = i+1;
}
suf += vet[i+1];
}
else{
ini_S = fim_S = i + 2;
suf = 0;
}
}
}
}
printf(" \n\n A sub Sequencia deu %d \n\n", t_M);
}
int main(int argc, char** argv){
float elapsedTime; // Tempo
hipEvent_t start, stop; // Tempo
//Vetor aux
int *vet_d; int *vetFinal_d;
if (argc != 3) {
fprintf(stderr, "Syntax: %s <Vector size Width> <device id>\n", argv[0]);
return EXIT_FAILURE;
}
//Vet
int N = atoi(argv[1]);
int *vet_h = (int *) malloc(sizeof(int) * N); // Vetor Dados
int *vetFinal_h = (int *) malloc(sizeof(int) * NFinal);// Vetor Final
int i;
for(i = 0; i < N; i++){ // Preenchimento dos dados
vet_h[i] = -1;
}
for(i = 0; i < NFinal; i++){ // Preenchimento dos dados
vetFinal_h[i] = -1;
}
vet_h[131] = 954;
vet_h[132] = 10;
int devId = atoi(argv[2]);
checkCuda( hipSetDevice(devId) );
hipDeviceReset();
hipDeviceProp_t prop;
checkCuda( hipGetDeviceProperties(&prop, devId) );
printf("Device: %s\n", prop.name);
//Reservando o espao na memoria no device
hipMalloc((void**)&vet_d, N * sizeof(int)); //Vetor de dados
hipMalloc((void**)&vetFinal_d, NFinal * sizeof(int));// Vetor Final
//Copiando o vetor de dados para o device
hipMemcpy(vet_d, vet_h, N * sizeof(int), hipMemcpyHostToDevice);
int ElemPorBlocos = (N / BLOCK_SIZE);
int ElemPorThread = (ElemPorBlocos / nThreadsPerBlock);
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
hipProfilerStart();
hipLaunchKernelGGL(( subSeqMax), dim3(BLOCK_SIZE), dim3(nThreadsPerBlock), 0, 0, vet_d, vetFinal_d, ElemPorThread,N / BLOCK_SIZE);
hipProfilerStop();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsedTime, start, stop);
printf("Primeiro kernel (ms) = \%f\n\n", elapsedTime);
hipMemcpy(vetFinal_h, vetFinal_d, NFinal * sizeof(int), hipMemcpyDeviceToHost); //Resposta Final
for(i = 0; i < 4096; i++){
if(vetFinal_h[i] != 0 && vetFinal_h[i] != -1 )
printf("%d ", vetFinal_h[i]);
}
printf("\n\n");
hipFree(vetFinal_d);
hipFree(vet_d);
subSeqMaxFinal(vetFinal_h, NFinal);
return 0;
}
| 94188c45683b24e68a703e8eeeb44fb9819a0d6a.cu | #include <stdio.h>
#include <stdlib.h>
#include <cuda_profiler_api.h>
//#define N 1573700//1310720//262144//131072//262144//83886080
//Quantidade de threads por blocos
#define BLOCK_SIZE 32//1//1024//95536
#define nThreadsPerBlock 128//420//128//420 ou 416
#define NFinal (nThreadsPerBlock * 5)
// Convenience function for checking CUDA runtime API results
// can be wrapped around any runtime API call. No-op in release builds.
inline
cudaError_t checkCuda(cudaError_t result)
{
#if defined(DEBUG) || defined(_DEBUG)
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
#endif
return result;
}
__device__ int* memoria(int *vetDados, int ElemPorBlocos, int qtdProces){
__shared__ int vetComp[4096];
int auxGrupoDe32 = (qtdProces * 32);
int comecoBloco = blockIdx.x * ElemPorBlocos; // onde cada bloco irá comeca
int qtdElemThread = ElemPorBlocos / blockDim.x;
int idCompartilhada = threadIdx.x;
int idGlobal = comecoBloco + ((threadIdx.x / 32) * qtdElemThread) + (threadIdx.x - ((threadIdx.x / 32) * 32)) + auxGrupoDe32;
int i;
for(i = 0; i < 4096; i += blockDim.x){
vetComp[idCompartilhada] = vetDados[idGlobal];
idCompartilhada += blockDim.x;
idGlobal += (qtdElemThread * 4);
}
return vetComp;
}
__global__ void subSeqMax(int *vet, int *vetFinal, int ElemPorThread, int n){
__shared__ int *p; // ponteiro para apontar para o vetor compartilhado
// M t_m S suf
int ini_M, fim_M, t_M, ini_S, fim_S, suf; //Variaveis do algoritmo
t_M = suf = 0;
int comecoThread = (threadIdx.x * 32);
int j;
for(j = 0; j < (n / 4096); j++){ // Quantas vezes terei que processa até chegar no n/blocos sendo que o vet compartilhado é de 4096
p = memoria(vet,n,j);
__syncthreads();
if(threadIdx.x < 128){
ini_M = fim_M = ini_S = fim_S = comecoThread -1;
int i;
for(i = comecoThread -1; i < comecoThread + 32; i++){
if(i == fim_M){
fim_S++;
suf += p[i+1];
if(suf < 0){
suf = 0;
fim_S = -1;
}
ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S
if(p[i+1] > 0){
fim_M++;
t_M += p[i+1];
ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M
}
}
else{
if(suf + p[i+1] > t_M){
fim_S++;
if(ini_M == -1){
fim_S = ini_S = i +1;
}
suf += p[i+1];
ini_M = ini_S;
fim_M = fim_S;
t_M = suf;
}
else{
if(suf + p[i+1] > 0){
fim_S++;
if(suf == 0){
ini_S = fim_S = i+1;
}
suf += p[i+1];
}
else{
ini_S = fim_S = i + 2;
suf = 0;
}
}//else
}//else
}// 1* for
}// If 128
}// 2* for
if(threadIdx.x < 128){
int idThread = blockIdx.x * blockDim.x + threadIdx.x;
vetFinal[(idThread * 5)] = vetFinal[(idThread * 5)+1] = vetFinal[(idThread * 5)+2] = vetFinal[(idThread * 5)+3] =
vetFinal[(idThread * 5)+4] = -1;
//Colocando o M
vetFinal[(idThread * 5)+2] = t_M;
//Calculando o Prefixo
int pref_Max, soma_Pref;
soma_Pref = 0;
pref_Max = 0;
int i;
if(ini_M > comecoThread -1){
for(i = 0; i < ini_M; i++){
soma_Pref += p[i];
if(soma_Pref > pref_Max){
pref_Max = soma_Pref;
}
}
if(pref_Max == 0){
vetFinal[(idThread * 5)] = 0;
vetFinal[(idThread * 5)+1] = soma_Pref;
}
else{
vetFinal[(idThread * 5)] = pref_Max; //Prefixo
vetFinal[(idThread * 5)+1] = soma_Pref - pref_Max; //Numeros negativos
}
}
//Calculo do sufixo
int suf_Max, soma_Suf;
soma_Suf = suf_Max = 0;
if(fim_M < comecoThread + 32){
for(i = (comecoThread + 32)-1; i > fim_M; i--){
soma_Suf += p[i];
if(soma_Suf > suf_Max){
suf_Max = soma_Suf;
}
}
if(suf_Max == 0){
vetFinal[(idThread * 5)+3] = 0; //Sufixo vazio
vetFinal[(idThread * 5)+4] = suf_Max;//Os Numeros negativos
}
else{
vetFinal[(idThread * 5)+3] = suf_Max; //Sufixo vazio
vetFinal[(idThread * 5)+4] = soma_Suf - suf_Max;//Os Numeros negativos
}
}
}//if 128
}
void subSeqMaxFinal(int *vet, int n){
// M t_m S suf
int ini_M, fim_M, t_M, ini_S, fim_S, suf;
ini_M = fim_M = ini_S = fim_S = -1;
t_M = suf = 0;
int i;
for(i = -1; i < n-1; i++){
if(i == fim_M){
fim_S++;
suf += vet[i+1];
if(suf < 0){
suf = 0;
fim_S = -1;
}
ini_S = fim_S == 0 ? 0 : ini_S; // Inicio S
if(vet[i+1] > 0){
fim_M++;
t_M += vet[i+1];
ini_M = fim_M == 0 ? 0 : ini_M; // Inicio M
}
}
else{
if(suf + vet[i+1] > t_M){
fim_S++;
if(ini_M == -1){
fim_S = ini_S = i +1;
}
suf += vet[i+1];
ini_M = ini_S;
fim_M = fim_S;
t_M = suf;
}
else{
if(suf + vet[i+1] > 0){
fim_S++;
if(suf == 0){
ini_S = fim_S = i+1;
}
suf += vet[i+1];
}
else{
ini_S = fim_S = i + 2;
suf = 0;
}
}
}
}
printf(" \n\n A sub Sequencia deu %d \n\n", t_M);
}
int main(int argc, char** argv){
float elapsedTime; // Tempo
cudaEvent_t start, stop; // Tempo
//Vetor aux
int *vet_d; int *vetFinal_d;
if (argc != 3) {
fprintf(stderr, "Syntax: %s <Vector size Width> <device id>\n", argv[0]);
return EXIT_FAILURE;
}
//Vet
int N = atoi(argv[1]);
int *vet_h = (int *) malloc(sizeof(int) * N); // Vetor Dados
int *vetFinal_h = (int *) malloc(sizeof(int) * NFinal);// Vetor Final
int i;
for(i = 0; i < N; i++){ // Preenchimento dos dados
vet_h[i] = -1;
}
for(i = 0; i < NFinal; i++){ // Preenchimento dos dados
vetFinal_h[i] = -1;
}
vet_h[131] = 954;
vet_h[132] = 10;
int devId = atoi(argv[2]);
checkCuda( cudaSetDevice(devId) );
cudaDeviceReset();
cudaDeviceProp prop;
checkCuda( cudaGetDeviceProperties(&prop, devId) );
printf("Device: %s\n", prop.name);
//Reservando o espaço na memoria no device
cudaMalloc((void**)&vet_d, N * sizeof(int)); //Vetor de dados
cudaMalloc((void**)&vetFinal_d, NFinal * sizeof(int));// Vetor Final
//Copiando o vetor de dados para o device
cudaMemcpy(vet_d, vet_h, N * sizeof(int), cudaMemcpyHostToDevice);
int ElemPorBlocos = (N / BLOCK_SIZE);
int ElemPorThread = (ElemPorBlocos / nThreadsPerBlock);
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
cudaProfilerStart();
subSeqMax<<<BLOCK_SIZE, nThreadsPerBlock>>>(vet_d, vetFinal_d, ElemPorThread,N / BLOCK_SIZE);
cudaProfilerStop();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("Primeiro kernel (ms) = \%f\n\n", elapsedTime);
cudaMemcpy(vetFinal_h, vetFinal_d, NFinal * sizeof(int), cudaMemcpyDeviceToHost); //Resposta Final
for(i = 0; i < 4096; i++){
if(vetFinal_h[i] != 0 && vetFinal_h[i] != -1 )
printf("%d ", vetFinal_h[i]);
}
printf("\n\n");
cudaFree(vetFinal_d);
cudaFree(vet_d);
subSeqMaxFinal(vetFinal_h, NFinal);
return 0;
}
|
414f6d050fa5834feafcc645715ced65ee156fb6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <ctime>
#include <iostream>
#include <math.h>
#include <cstdlib>
#include "hip/hip_runtime.h"
#include "CPU_StreamCompaction.h"
#include "device_launch_parameters.h"
using namespace std;
const int n_input=1000000;
const int iters=1000;
const int threadsPerBlock=512;
#define CPU_SCAN 1;
#define GPU_NAIVE 1;
#define GPU_SHARED_NAIVE 0;
#define GPU_SHARED_LG 1;
#define CPU_SCATTER 1;
__global__ void scan_GPU_naive(int *input,int *output,int n)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx<n && idx>0)
output[idx]=input[idx-1];
__syncthreads();
for(int d=1;d<n;d*=2)
{
if(idx>=d && idx<n)
output[idx]=output[idx]+output[idx-d];
__syncthreads();
}
}
//modified from NVIDIA prefix sum slides
__global__ void scan_GPU_shared_naive(int *input, int *output, int n)
{
extern __shared__ int sdata[];
int idx=threadIdx.x;
int out=0,in=1;
if(idx<n)
{
sdata[idx]=(idx>0)?input[idx-1]:0;
sdata[n+idx]=0;
}
__syncthreads();
for(int d=1;d<n;d*=2)
{
out=1-out;
in=1-out;
__syncthreads();
if(idx>=d && idx<n)
sdata[out*n+idx]=sdata[in*n+idx]+sdata[in*n+idx-d];
else
sdata[out*n+idx]=sdata[in*n+idx];
}
output[idx]=sdata[out*n+idx];
}
__global__ void scan_GPU_shared_LG(int *input,int *output,int *aux,int n)
{
extern __shared__ int sdata[];
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int out=0,in=1;
if(idx<n)
{
sdata[threadIdx.x]=input[idx];
sdata[n+threadIdx.x]=0;
}
for(int d=1;d<n;d*=2)
{
out=1-out;
in=1-out;
if(threadIdx.x>=d && idx<n)
sdata[out*n+threadIdx.x]=sdata[in*n+threadIdx.x]+sdata[in*n+threadIdx.x-d];
else
sdata[out*n+threadIdx.x]=sdata[in*n+threadIdx.x];
__syncthreads();
}
output[idx]=sdata[out*n+threadIdx.x];
if(blockIdx.x<n_input/threadsPerBlock)
aux[blockIdx.x]=sdata[out*n+blockDim.x-1];
}
__global__ void scan_GPU_shared_LG_add(int *input,int *output,int *aux,int n)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n)
{
if(blockIdx.x>0)
output[idx]=output[idx]-input[idx]+aux[blockIdx.x-1];
else
output[idx]=output[idx]-input[idx];
}
}
void scan_GPU_LG(int *input,int *output)
{
int dimBlock=threadsPerBlock;
int dimGrid=(n_input+dimBlock-1)/dimBlock;
int *d_aux;
hipMalloc(&d_aux,dimGrid*sizeof(int));
hipLaunchKernelGGL(( scan_GPU_shared_LG), dim3(dimGrid),dim3(dimBlock),2*n_input*sizeof(int), 0, input,output,d_aux,n_input);
hipLaunchKernelGGL(( scan_GPU_shared_LG_add), dim3(dimGrid),dim3(dimBlock), 0, 0, input,output,d_aux,n_input);
}
__global__ void scatter_GPU(int *input,int *output,int n)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n)
output[idx]=(input[idx]>0)?1:0;
}
__global__ void streamCompactGPU(int *input,int *sum, int *inbool, int *output,int n)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n-1)
if(sum[idx]!=sum[idx+1])
output[sum[idx]]=input[idx];
if(idx==n-1)
if(inbool[idx])
output[sum[idx]]=input[idx];
}
void streamCompact(int *input,int *output)
{
int dimBlock=threadsPerBlock;
int dimGrid=(n_input+dimBlock-1)/dimBlock;
int *d_sum,*d_bool;
hipMalloc(&d_sum,dimGrid*sizeof(int));
hipMalloc(&d_bool,dimGrid*sizeof(int));
hipLaunchKernelGGL(( scatter_GPU), dim3(dimGrid),dim3(dimBlock), 0, 0, input,d_bool,n_input);
scan_GPU_LG(d_bool,d_sum);
hipLaunchKernelGGL(( streamCompactGPU), dim3(dimGrid),dim3(dimBlock), 0, 0, input,d_sum,d_bool,output,n_input);
}
//
//__global__ void scan_GPU_shared_op(int *input,int *output,int n)
//{
// extern __shared__ int sdata[];
// int idx=threadIdx.x;
// int offset=1;
// if(2*idx<n)
// sdata[2*idx]=input[2*idx];
// if(2*idx+1<n)
// sdata[2*idx+1]=input[2*idx+1];
// __syncthreads();
// for(int d=n>>1;d>0;d>>=1)
// {
// if(idx<d)
// {
// int ai=offset*(2*idx+1)-1;
// int bi=offset*(2*idx+2)-1;
// }
// __syncthreads();
//
//
// }
//
//
//
//}
int main()
{
int *a=new int[n_input];
int *aux=new int[n_input];
int *scan=new int[n_input];
int *scatter=new int[n_input];
int *d_a,*d_scan,*d_stream;
float time=0.0f;
hipMalloc(&d_a,n_input*sizeof(int));
hipMalloc(&d_scan,n_input*sizeof(int));
hipMalloc(&d_stream,n_input*sizeof(int));
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
int *answer_scan=new int[n_input];
int *answer_scatter=new int[n_input];
int num_nonzero=0;
int num_zero=0;
for(int i=0;i<n_input;i++)
{
//a[i]=i;
a[i]=rand()%5;
if(a[i]!=0)
{
answer_scatter[num_nonzero]=a[i];
num_nonzero+=1;
}
else
{
answer_scatter[n_input-1-num_zero]=0;
num_zero+=1;
}
scan[i]=0;
scatter[i]=0;
aux[i]=0;
}
hipMemcpy(d_a,a,n_input*sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(d_scan,scan,n_input*sizeof(int),hipMemcpyHostToDevice);
answer_scan[0]=0;
for(int i=1;i<n_input;i++)
answer_scan[i]=answer_scan[i-1]+a[i-1];
#if CPU_SCAN
//CPU scan
cout<<"----------------------"<<endl;
cout<<"CPU scan test"<<endl;
clock_t begin=clock();
for(int k=0;k<iters;k++)
scan_CPU(a,scan,n_input);
clock_t end=clock();
cout<<"Runtime for "<<iters<<" iters="<<end-begin<<" ms"<<endl;
//postprocess(answer_scan,scan,n_input);
#endif
#if GPU_NAIVE
//GPU naive scan
cout<<"----------------------"<<endl;
cout<<"GPU Naive scan"<<endl;
int dimBlock=threadsPerBlock;
int dimGrid=int((n_input+dimBlock-1)/dimBlock);
hipEventRecord(start, 0);
for(int i=0;i<iters;i++)
{
hipLaunchKernelGGL(( scan_GPU_naive), dim3(dimGrid),dim3(dimBlock), 0, 0, d_a,d_scan,n_input);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop );
hipEventElapsedTime( &time, start, stop);
cout << "Runtime for " << iters << " iters=" << time << " ms" << endl;
hipMemcpy(scan,d_scan,n_input*sizeof(int),hipMemcpyDeviceToHost);
//postprocess(answer_scan,scan,n_input);
#endif
#if GPU_SHARED_NAIVE
//GPU scan with shared memory
cout<<"----------------------"<<endl;
cout<<"GPU scan with shared memory"<<endl;
dimBlock=threadsPerBlock;
dimGrid=int((n_input+dimBlock-1)/dimBlock);
hipEventRecord(start, 0);
for(int i=0;i<iters;i++)
{
hipLaunchKernelGGL(( scan_GPU_shared_naive), dim3(dimGrid),dim3(dimBlock),2*n_input*sizeof(int), 0, d_a,d_scan,n_input);
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop );
time=0.0f;
hipEventElapsedTime( &time, start, stop);
cout << "Runtime for " << iters << " iters=" << time << " ms" << endl;
hipMemcpy(scan,d_scan,n_input*sizeof(int),hipMemcpyDeviceToHost);
//postprocess(answer_scan,scan,n_input);
#endif
#if GPU_SHARED_LG
cout<<"----------------------"<<endl;
cout<<"GPU scan with shared memory for large array"<<endl;
hipEventRecord(start, 0);
for(int k=0;k<iters;k++)
scan_GPU_LG(d_a,d_scan);
hipEventRecord(stop, 0);
hipEventSynchronize(stop );
time=0.0f;
hipEventElapsedTime( &time, start, stop);
cout<<"Runtime for "<<iters<<" iters="<<time<<" ms"<<endl;
hipMemcpy(scan,d_scan,n_input*sizeof(int),hipMemcpyDeviceToHost);
//postprocess(answer_scan,scan,n_input);
#endif
#if CPU_SCATTER
//CPU scan
cout<<"----------------------"<<endl;
cout<<"CPU scatter test"<<endl;
begin=clock();
for(int k=0;k<iters;k++)
scatter_CPU(a,scatter,n_input);
end=clock();
cout<<"Runtime for "<<iters<<" iters="<<end-begin<<" ms"<<endl;
//postprocess(answer_scatter,scatter,n_input);
#endif
//streamCompact(d_a,d_stream);
free(a);
free(scan);
free(answer_scan);
hipFree(d_a);
hipFree(d_scan);
getchar();
return 0;
} | 414f6d050fa5834feafcc645715ced65ee156fb6.cu | #include <stdlib.h>
#include <stdio.h>
#include <ctime>
#include <iostream>
#include <math.h>
#include <cstdlib>
#include "cuda_runtime.h"
#include "CPU_StreamCompaction.h"
#include "device_launch_parameters.h"
using namespace std;
const int n_input=1000000;
const int iters=1000;
const int threadsPerBlock=512;
#define CPU_SCAN 1;
#define GPU_NAIVE 1;
#define GPU_SHARED_NAIVE 0;
#define GPU_SHARED_LG 1;
#define CPU_SCATTER 1;
__global__ void scan_GPU_naive(int *input,int *output,int n)
{
int idx=blockIdx.x*blockDim.x+threadIdx.x;
if(idx<n && idx>0)
output[idx]=input[idx-1];
__syncthreads();
for(int d=1;d<n;d*=2)
{
if(idx>=d && idx<n)
output[idx]=output[idx]+output[idx-d];
__syncthreads();
}
}
//modified from NVIDIA prefix sum slides
__global__ void scan_GPU_shared_naive(int *input, int *output, int n)
{
extern __shared__ int sdata[];
int idx=threadIdx.x;
int out=0,in=1;
if(idx<n)
{
sdata[idx]=(idx>0)?input[idx-1]:0;
sdata[n+idx]=0;
}
__syncthreads();
for(int d=1;d<n;d*=2)
{
out=1-out;
in=1-out;
__syncthreads();
if(idx>=d && idx<n)
sdata[out*n+idx]=sdata[in*n+idx]+sdata[in*n+idx-d];
else
sdata[out*n+idx]=sdata[in*n+idx];
}
output[idx]=sdata[out*n+idx];
}
__global__ void scan_GPU_shared_LG(int *input,int *output,int *aux,int n)
{
extern __shared__ int sdata[];
int idx=blockDim.x*blockIdx.x+threadIdx.x;
int out=0,in=1;
if(idx<n)
{
sdata[threadIdx.x]=input[idx];
sdata[n+threadIdx.x]=0;
}
for(int d=1;d<n;d*=2)
{
out=1-out;
in=1-out;
if(threadIdx.x>=d && idx<n)
sdata[out*n+threadIdx.x]=sdata[in*n+threadIdx.x]+sdata[in*n+threadIdx.x-d];
else
sdata[out*n+threadIdx.x]=sdata[in*n+threadIdx.x];
__syncthreads();
}
output[idx]=sdata[out*n+threadIdx.x];
if(blockIdx.x<n_input/threadsPerBlock)
aux[blockIdx.x]=sdata[out*n+blockDim.x-1];
}
__global__ void scan_GPU_shared_LG_add(int *input,int *output,int *aux,int n)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n)
{
if(blockIdx.x>0)
output[idx]=output[idx]-input[idx]+aux[blockIdx.x-1];
else
output[idx]=output[idx]-input[idx];
}
}
void scan_GPU_LG(int *input,int *output)
{
int dimBlock=threadsPerBlock;
int dimGrid=(n_input+dimBlock-1)/dimBlock;
int *d_aux;
cudaMalloc(&d_aux,dimGrid*sizeof(int));
scan_GPU_shared_LG<<<dimGrid,dimBlock,2*n_input*sizeof(int)>>>(input,output,d_aux,n_input);
scan_GPU_shared_LG_add<<<dimGrid,dimBlock>>>(input,output,d_aux,n_input);
}
__global__ void scatter_GPU(int *input,int *output,int n)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n)
output[idx]=(input[idx]>0)?1:0;
}
__global__ void streamCompactGPU(int *input,int *sum, int *inbool, int *output,int n)
{
int idx=blockDim.x*blockIdx.x+threadIdx.x;
if(idx<n-1)
if(sum[idx]!=sum[idx+1])
output[sum[idx]]=input[idx];
if(idx==n-1)
if(inbool[idx])
output[sum[idx]]=input[idx];
}
void streamCompact(int *input,int *output)
{
int dimBlock=threadsPerBlock;
int dimGrid=(n_input+dimBlock-1)/dimBlock;
int *d_sum,*d_bool;
cudaMalloc(&d_sum,dimGrid*sizeof(int));
cudaMalloc(&d_bool,dimGrid*sizeof(int));
scatter_GPU<<<dimGrid,dimBlock>>>(input,d_bool,n_input);
scan_GPU_LG(d_bool,d_sum);
streamCompactGPU<<<dimGrid,dimBlock>>>(input,d_sum,d_bool,output,n_input);
}
//
//__global__ void scan_GPU_shared_op(int *input,int *output,int n)
//{
// extern __shared__ int sdata[];
// int idx=threadIdx.x;
// int offset=1;
// if(2*idx<n)
// sdata[2*idx]=input[2*idx];
// if(2*idx+1<n)
// sdata[2*idx+1]=input[2*idx+1];
// __syncthreads();
// for(int d=n>>1;d>0;d>>=1)
// {
// if(idx<d)
// {
// int ai=offset*(2*idx+1)-1;
// int bi=offset*(2*idx+2)-1;
// }
// __syncthreads();
//
//
// }
//
//
//
//}
int main()
{
int *a=new int[n_input];
int *aux=new int[n_input];
int *scan=new int[n_input];
int *scatter=new int[n_input];
int *d_a,*d_scan,*d_stream;
float time=0.0f;
cudaMalloc(&d_a,n_input*sizeof(int));
cudaMalloc(&d_scan,n_input*sizeof(int));
cudaMalloc(&d_stream,n_input*sizeof(int));
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
int *answer_scan=new int[n_input];
int *answer_scatter=new int[n_input];
int num_nonzero=0;
int num_zero=0;
for(int i=0;i<n_input;i++)
{
//a[i]=i;
a[i]=rand()%5;
if(a[i]!=0)
{
answer_scatter[num_nonzero]=a[i];
num_nonzero+=1;
}
else
{
answer_scatter[n_input-1-num_zero]=0;
num_zero+=1;
}
scan[i]=0;
scatter[i]=0;
aux[i]=0;
}
cudaMemcpy(d_a,a,n_input*sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(d_scan,scan,n_input*sizeof(int),cudaMemcpyHostToDevice);
answer_scan[0]=0;
for(int i=1;i<n_input;i++)
answer_scan[i]=answer_scan[i-1]+a[i-1];
#if CPU_SCAN
//CPU scan
cout<<"----------------------"<<endl;
cout<<"CPU scan test"<<endl;
clock_t begin=clock();
for(int k=0;k<iters;k++)
scan_CPU(a,scan,n_input);
clock_t end=clock();
cout<<"Runtime for "<<iters<<" iters="<<end-begin<<" ms"<<endl;
//postprocess(answer_scan,scan,n_input);
#endif
#if GPU_NAIVE
//GPU naive scan
cout<<"----------------------"<<endl;
cout<<"GPU Naive scan"<<endl;
int dimBlock=threadsPerBlock;
int dimGrid=int((n_input+dimBlock-1)/dimBlock);
cudaEventRecord(start, 0);
for(int i=0;i<iters;i++)
{
scan_GPU_naive<<<dimGrid,dimBlock>>>(d_a,d_scan,n_input);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop );
cudaEventElapsedTime( &time, start, stop);
cout << "Runtime for " << iters << " iters=" << time << " ms" << endl;
cudaMemcpy(scan,d_scan,n_input*sizeof(int),cudaMemcpyDeviceToHost);
//postprocess(answer_scan,scan,n_input);
#endif
#if GPU_SHARED_NAIVE
//GPU scan with shared memory
cout<<"----------------------"<<endl;
cout<<"GPU scan with shared memory"<<endl;
dimBlock=threadsPerBlock;
dimGrid=int((n_input+dimBlock-1)/dimBlock);
cudaEventRecord(start, 0);
for(int i=0;i<iters;i++)
{
scan_GPU_shared_naive<<<dimGrid,dimBlock,2*n_input*sizeof(int)>>>(d_a,d_scan,n_input);
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop );
time=0.0f;
cudaEventElapsedTime( &time, start, stop);
cout << "Runtime for " << iters << " iters=" << time << " ms" << endl;
cudaMemcpy(scan,d_scan,n_input*sizeof(int),cudaMemcpyDeviceToHost);
//postprocess(answer_scan,scan,n_input);
#endif
#if GPU_SHARED_LG
cout<<"----------------------"<<endl;
cout<<"GPU scan with shared memory for large array"<<endl;
cudaEventRecord(start, 0);
for(int k=0;k<iters;k++)
scan_GPU_LG(d_a,d_scan);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop );
time=0.0f;
cudaEventElapsedTime( &time, start, stop);
cout<<"Runtime for "<<iters<<" iters="<<time<<" ms"<<endl;
cudaMemcpy(scan,d_scan,n_input*sizeof(int),cudaMemcpyDeviceToHost);
//postprocess(answer_scan,scan,n_input);
#endif
#if CPU_SCATTER
//CPU scan
cout<<"----------------------"<<endl;
cout<<"CPU scatter test"<<endl;
begin=clock();
for(int k=0;k<iters;k++)
scatter_CPU(a,scatter,n_input);
end=clock();
cout<<"Runtime for "<<iters<<" iters="<<end-begin<<" ms"<<endl;
//postprocess(answer_scatter,scatter,n_input);
#endif
//streamCompact(d_a,d_stream);
free(a);
free(scan);
free(answer_scan);
cudaFree(d_a);
cudaFree(d_scan);
getchar();
return 0;
} |
217e4323c158896620a8754c31845645325f8c01.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <cstdlib>
#include <limits>
using namespace std;
#define Nparticles 30
#define T_MAX 1000
#define NFC_MAX 1000000
#define W_0 0.9
#define W_T 0.4
#define MAX_V 2.0
#define c1 2.0
#define c2 2.0
#define Nvariables 30
#define Rand() ((double)rand()/RAND_MAX);
__global__ void evalPlane(int n, double *x, double *fit) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
fit[i] += x[i] * x[i];
}
class Particle {
public:
double *x;
double *v;
double fitness;
double pBest;
double *xBest;
};
class Swarm {
private:
int gBest; //index
double gBestValue;
public:
Particle *P;
Swarm();
void setGBestIndex(int index);
void setGBestValue(double gBestValue);
void setParticleXV(int i, int j, double x, double v);
void setParticleFitness(int i, double fit);
void setParticlePBest(int i, double value, double *x);
Particle getParticleValue(int i);
int getGbest();
double getGbestValue();
};
Swarm::Swarm() {
gBest = 0;
P = (Particle *)malloc(sizeof(Particle)*Nparticles);
if (P==NULL) {
fprintf(stderr, "Cannot allocate memory for %d Particles\n", Nparticles);
exit (1);
}
for(int i = 0; i < Nparticles; i++) {
P[i].x = (double *)malloc(sizeof(double)*Nvariables);
if (P[i].x==NULL) {
fprintf(stderr, "Cannot allocate memory for %d x\n", Nvariables);
exit (1);
}
P[i].v = (double *)malloc(sizeof(double)*Nvariables);
if (P[i].v==NULL) {
fprintf(stderr, "Cannot allocate memory for %d v\n", Nvariables);
exit (1);
}
P[i].xBest = (double *)malloc(sizeof(double)*Nvariables);
if (P[i].xBest==NULL) {
fprintf(stderr, "Cannot allocate memory for %d xBest\n", Nvariables);
exit (1);
}
}
}
void Swarm::setGBestIndex(int index) {
this->gBest = index;
}
void Swarm::setGBestValue(double gBestValue) {
this->gBestValue = gBestValue;
}
void Swarm::setParticleXV(int i, int j, double x, double v) {
if (x != NULL) P[i].x[j] = x;
if (v != NULL) P[i].v[j] = v;
}
void Swarm::setParticleFitness(int i, double fit) {
P[i].fitness = fit;
}
void Swarm::setParticlePBest(int i, double value, double *x) {
P[i].pBest = value;
for(int j = 0; j < Nvariables; j++)
{
P[i].xBest[j] = P[i].x[j];
}
}
Particle Swarm::getParticleValue(int i) {
return P[i];
}
int Swarm::getGbest() {
return gBest;
}
double Swarm::getGbestValue() {
return gBestValue;
}
class PSO {
private:
int nfc;
double w;
double *maxV;
public:
Swarm swarm;
PSO();
void initialize();
void evolution();
void updateBest(int i);
void calculateVMax();
void particleMovement();
void evaluate(int i);
void evaluateSwarm();
};
PSO::PSO() {
maxV = (double *)malloc(sizeof(double)*Nvariables);
if (maxV==NULL) {
fprintf(stderr, "Cannot allocate memory for %d maxV\n", Nvariables);
exit (1);
}
}
void PSO::evolution() {
double dw = (W_0 - W_T) / (NFC_MAX / Nparticles);
w = W_0;
initialize();
nfc = 0;
while(nfc < NFC_MAX) {
calculateVMax();
particleMovement();
evaluateSwarm();
w -= dw;
}
}
void PSO::initialize() {
swarm.setGBestValue(numeric_limits<double>::max());
for(int i = 0; i < Nparticles; i++) {
for(int j = 0; j < Nvariables; j++) {
double x = -5.12 + 10.24 * Rand();
//double x = Rand();
double v = 0.0;
swarm.setParticleXV(i, j, x, v);
}
evaluate(i);
updateBest(i);
double fitness = swarm.getParticleValue(i).fitness;
double gbest = swarm.getGbestValue();
if (fitness < gbest) {
swarm.setGBestValue(fitness);
swarm.setGBestIndex(i);
}
}
Particle best = swarm.getParticleValue(swarm.getGbest());
printf("0 : ");
for (int j = 0; j < Nvariables; j++)
printf("%g ", best.xBest[j]);
printf(" = %e\n", best.pBest);
}
void PSO::evaluate(int i) {
//int index = i;
int ThreadsInBlock = 8;
int BlocksInGrid = (int)(Nvariables + ThreadsInBlock - 1) / ThreadsInBlock;
double *fitness = (double *)malloc(sizeof(double));
hipMallocManaged((void **)&fitness, sizeof(double));
hipMallocManaged((void **)&swarm.P[i].x, sizeof(double)*Nvariables);
hipLaunchKernelGGL(( evalPlane), dim3(BlocksInGrid), dim3(ThreadsInBlock), 0, 0, Nvariables, swarm.P[i].x, fitness);
hipGetLastError();
//if (hipSuccess != hipMemcpy())
hipDeviceSynchronize();
// double fitness = 0.0;
// for(int k = 0 ; k < Nvariables ; k++ ) {
// int x = swarm.getParticleValue(index).x[k];
// fitness += x * x;
// }
// double fitness = 0, temp = 0;
// for(int k = 0 ; k < Nvariables ; k++ ) {
// for(int l = 0; l < k; l++)
// {
// double x = swarm.getParticleValue(index).x[l];
// temp += x;
// }
// fitness += temp * temp;
// }
hipFree(fitness);
hipFree(swarm.P[i].x);
//swarm.setParticleFitness(index, fitness);
}
void PSO::evaluateSwarm() {
for(int i = 0; i < Nparticles; i++) {
evaluate(i);
nfc++;
if (nfc % 5000 == 0) {
Particle best = swarm.getParticleValue(swarm.getGbest());
printf("%d : ", nfc);
// for (int j = 0; j < Nvariables; j++)
// printf("%g ", best.xBest[j]);
printf(" = %g\n", best.pBest);
//cout << "PSO SPHERE nfc" << nfc << " \tbestfit " << best.pBest << "\n";
}
}
for(int n = 0; n < Nparticles; n++) {
Particle par = swarm.getParticleValue(n);
if (par.fitness < par.pBest ) {
swarm.setParticlePBest(n, par.fitness, par.x);
if (par.fitness < swarm.getGbestValue() ) {
swarm.setGBestIndex(n);
swarm.setGBestValue(par.fitness);
}
}
}
}
void PSO::updateBest(int i) {
Particle par = swarm.getParticleValue(i);
swarm.setParticlePBest(i, par.fitness, par.x);
}
void PSO::calculateVMax() {
double xmin[Nparticles], xmax[Nparticles];
for (int d = 0; d < Nvariables; d++) {
xmin[d] = xmax[d] = swarm.getParticleValue(0).x[d];
for (int n = 1; n < Nparticles; n++) {
double pos = swarm.getParticleValue(n).x[d];
if (pos < xmin[d])
xmin[d] = pos;
if (pos > xmax[d])
xmax[d] = pos;
}
maxV[d] = xmax[d] - xmin[d];
}
}
void PSO::particleMovement() {
int n, d;
for (n = 0; n < Nparticles ; n++) {
Particle par = swarm.getParticleValue(n);
Particle bPar = swarm.getParticleValue(swarm.getGbest());
// update velocities
for(d = 0; d < Nvariables ; d++ ) {
double r1 = Rand();
double r2 = Rand();
double newV = w * par.v[d] + c1 * r1 * (par.xBest[d] - par.x[d]) + c2 * r2 * (bPar.x[d] - par.x[d]);
swarm.setParticleXV(n, d, NULL, newV);
// check v with its dimensional maxV
if ( swarm.getParticleValue(n).v[d] > maxV[d] ) swarm.setParticleXV(n, d, NULL, maxV[d]);
else if ( swarm.getParticleValue(n).v[d] < -maxV[d] ) swarm.setParticleXV(n, d, NULL, -maxV[d]);
}
// update positions
Particle newPar = swarm.getParticleValue(n);
for (d = 0; d < Nvariables ; d++) {
//newPar.x[d] = newPar.x[d] + newPar.v[d];
swarm.setParticleXV(n, d, newPar.x[d] + newPar.v[d], NULL);
}
}
}
int main(int argc, char **argv) {
//int threadCount = BlocksInGrid * ThreadsInBlock;
//long size = threadCount * sizeof(double);
PSO pso;
pso.evolution();
return 0;
} | 217e4323c158896620a8754c31845645325f8c01.cu | #include <iostream>
#include <cstdlib>
#include <limits>
using namespace std;
#define Nparticles 30
#define T_MAX 1000
#define NFC_MAX 1000000
#define W_0 0.9
#define W_T 0.4
#define MAX_V 2.0
#define c1 2.0
#define c2 2.0
#define Nvariables 30
#define Rand() ((double)rand()/RAND_MAX);
__global__ void evalPlane(int n, double *x, double *fit) {
int index = blockIdx.x * blockDim.x + threadIdx.x;
int stride = blockDim.x * gridDim.x;
for (int i = index; i < n; i += stride)
fit[i] += x[i] * x[i];
}
class Particle {
public:
double *x;
double *v;
double fitness;
double pBest;
double *xBest;
};
class Swarm {
private:
int gBest; //index
double gBestValue;
public:
Particle *P;
Swarm();
void setGBestIndex(int index);
void setGBestValue(double gBestValue);
void setParticleXV(int i, int j, double x, double v);
void setParticleFitness(int i, double fit);
void setParticlePBest(int i, double value, double *x);
Particle getParticleValue(int i);
int getGbest();
double getGbestValue();
};
Swarm::Swarm() {
gBest = 0;
P = (Particle *)malloc(sizeof(Particle)*Nparticles);
if (P==NULL) {
fprintf(stderr, "Cannot allocate memory for %d Particles\n", Nparticles);
exit (1);
}
for(int i = 0; i < Nparticles; i++) {
P[i].x = (double *)malloc(sizeof(double)*Nvariables);
if (P[i].x==NULL) {
fprintf(stderr, "Cannot allocate memory for %d x\n", Nvariables);
exit (1);
}
P[i].v = (double *)malloc(sizeof(double)*Nvariables);
if (P[i].v==NULL) {
fprintf(stderr, "Cannot allocate memory for %d v\n", Nvariables);
exit (1);
}
P[i].xBest = (double *)malloc(sizeof(double)*Nvariables);
if (P[i].xBest==NULL) {
fprintf(stderr, "Cannot allocate memory for %d xBest\n", Nvariables);
exit (1);
}
}
}
void Swarm::setGBestIndex(int index) {
this->gBest = index;
}
void Swarm::setGBestValue(double gBestValue) {
this->gBestValue = gBestValue;
}
void Swarm::setParticleXV(int i, int j, double x, double v) {
if (x != NULL) P[i].x[j] = x;
if (v != NULL) P[i].v[j] = v;
}
void Swarm::setParticleFitness(int i, double fit) {
P[i].fitness = fit;
}
void Swarm::setParticlePBest(int i, double value, double *x) {
P[i].pBest = value;
for(int j = 0; j < Nvariables; j++)
{
P[i].xBest[j] = P[i].x[j];
}
}
Particle Swarm::getParticleValue(int i) {
return P[i];
}
int Swarm::getGbest() {
return gBest;
}
double Swarm::getGbestValue() {
return gBestValue;
}
class PSO {
private:
int nfc;
double w;
double *maxV;
public:
Swarm swarm;
PSO();
void initialize();
void evolution();
void updateBest(int i);
void calculateVMax();
void particleMovement();
void evaluate(int i);
void evaluateSwarm();
};
PSO::PSO() {
maxV = (double *)malloc(sizeof(double)*Nvariables);
if (maxV==NULL) {
fprintf(stderr, "Cannot allocate memory for %d maxV\n", Nvariables);
exit (1);
}
}
void PSO::evolution() {
double dw = (W_0 - W_T) / (NFC_MAX / Nparticles);
w = W_0;
initialize();
nfc = 0;
while(nfc < NFC_MAX) {
calculateVMax();
particleMovement();
evaluateSwarm();
w -= dw;
}
}
void PSO::initialize() {
swarm.setGBestValue(numeric_limits<double>::max());
for(int i = 0; i < Nparticles; i++) {
for(int j = 0; j < Nvariables; j++) {
double x = -5.12 + 10.24 * Rand();
//double x = Rand();
double v = 0.0;
swarm.setParticleXV(i, j, x, v);
}
evaluate(i);
updateBest(i);
double fitness = swarm.getParticleValue(i).fitness;
double gbest = swarm.getGbestValue();
if (fitness < gbest) {
swarm.setGBestValue(fitness);
swarm.setGBestIndex(i);
}
}
Particle best = swarm.getParticleValue(swarm.getGbest());
printf("0 : ");
for (int j = 0; j < Nvariables; j++)
printf("%g ", best.xBest[j]);
printf(" = %e\n", best.pBest);
}
void PSO::evaluate(int i) {
//int index = i;
int ThreadsInBlock = 8;
int BlocksInGrid = (int)(Nvariables + ThreadsInBlock - 1) / ThreadsInBlock;
double *fitness = (double *)malloc(sizeof(double));
cudaMallocManaged((void **)&fitness, sizeof(double));
cudaMallocManaged((void **)&swarm.P[i].x, sizeof(double)*Nvariables);
evalPlane<<<BlocksInGrid, ThreadsInBlock>>>(Nvariables, swarm.P[i].x, fitness);
cudaGetLastError();
//if (cudaSuccess != cudaMemcpy())
cudaDeviceSynchronize();
// double fitness = 0.0;
// for(int k = 0 ; k < Nvariables ; k++ ) {
// int x = swarm.getParticleValue(index).x[k];
// fitness += x * x;
// }
// double fitness = 0, temp = 0;
// for(int k = 0 ; k < Nvariables ; k++ ) {
// for(int l = 0; l < k; l++)
// {
// double x = swarm.getParticleValue(index).x[l];
// temp += x;
// }
// fitness += temp * temp;
// }
cudaFree(fitness);
cudaFree(swarm.P[i].x);
//swarm.setParticleFitness(index, fitness);
}
void PSO::evaluateSwarm() {
for(int i = 0; i < Nparticles; i++) {
evaluate(i);
nfc++;
if (nfc % 5000 == 0) {
Particle best = swarm.getParticleValue(swarm.getGbest());
printf("%d : ", nfc);
// for (int j = 0; j < Nvariables; j++)
// printf("%g ", best.xBest[j]);
printf(" = %g\n", best.pBest);
//cout << "PSO SPHERE nfc" << nfc << " \tbestfit " << best.pBest << "\n";
}
}
for(int n = 0; n < Nparticles; n++) {
Particle par = swarm.getParticleValue(n);
if (par.fitness < par.pBest ) {
swarm.setParticlePBest(n, par.fitness, par.x);
if (par.fitness < swarm.getGbestValue() ) {
swarm.setGBestIndex(n);
swarm.setGBestValue(par.fitness);
}
}
}
}
void PSO::updateBest(int i) {
Particle par = swarm.getParticleValue(i);
swarm.setParticlePBest(i, par.fitness, par.x);
}
void PSO::calculateVMax() {
double xmin[Nparticles], xmax[Nparticles];
for (int d = 0; d < Nvariables; d++) {
xmin[d] = xmax[d] = swarm.getParticleValue(0).x[d];
for (int n = 1; n < Nparticles; n++) {
double pos = swarm.getParticleValue(n).x[d];
if (pos < xmin[d])
xmin[d] = pos;
if (pos > xmax[d])
xmax[d] = pos;
}
maxV[d] = xmax[d] - xmin[d];
}
}
void PSO::particleMovement() {
int n, d;
for (n = 0; n < Nparticles ; n++) {
Particle par = swarm.getParticleValue(n);
Particle bPar = swarm.getParticleValue(swarm.getGbest());
// update velocities
for(d = 0; d < Nvariables ; d++ ) {
double r1 = Rand();
double r2 = Rand();
double newV = w * par.v[d] + c1 * r1 * (par.xBest[d] - par.x[d]) + c2 * r2 * (bPar.x[d] - par.x[d]);
swarm.setParticleXV(n, d, NULL, newV);
// check v with its dimensional maxV
if ( swarm.getParticleValue(n).v[d] > maxV[d] ) swarm.setParticleXV(n, d, NULL, maxV[d]);
else if ( swarm.getParticleValue(n).v[d] < -maxV[d] ) swarm.setParticleXV(n, d, NULL, -maxV[d]);
}
// update positions
Particle newPar = swarm.getParticleValue(n);
for (d = 0; d < Nvariables ; d++) {
//newPar.x[d] = newPar.x[d] + newPar.v[d];
swarm.setParticleXV(n, d, newPar.x[d] + newPar.v[d], NULL);
}
}
}
int main(int argc, char **argv) {
//int threadCount = BlocksInGrid * ThreadsInBlock;
//long size = threadCount * sizeof(double);
PSO pso;
pso.evolution();
return 0;
} |
4046ed0e4730d5cfd41065207ef33717b49e2adc.hip | // !!! This is a file automatically generated by hipify!!!
#if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include <hip/hip_runtime.h>
#include "backproject.hcu"
#include <iostream>
using namespace std;
// from
// https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__constant__ float geom_[MAX_PROJ_STACK_SIZE * 12];
__constant__ int3 proj_shape_;
__constant__ int3 vol_shape_;
__constant__ float3 vol_orig_;
__constant__ float3 voxel_size_;
static int3 proj_shape_host_;
static int3 vol_shape_host_;
static float2 pixel_dim_host_;
__constant__ float displacement_;
__constant__ float2 pixel_shape_;
texture<float, hipTextureType2DLayered> projTex_;
inline __device__
float3 map( float3&& vp, int n )
{
const float* matrix = &(geom_[n*12]);
float3 mat = make_float3(
(matrix[0] * vp.x + matrix[1] * vp.y + matrix[2] * vp.z + matrix[3]),
(matrix[4] * vp.x + matrix[5] * vp.y + matrix[6] * vp.z + matrix[7]),
(matrix[8] * vp.x + matrix[9] * vp.y + matrix[10] * vp.z + matrix[11])
);
return mat;
}
__global__
void kernel_backproject( float* vol )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z*blockDim.z + threadIdx.z;
if( i >= vol_shape_.x || j >= vol_shape_.y || k >= vol_shape_.z )
return;
const float x = i*voxel_size_.x + vol_orig_.x;
const float y = j*voxel_size_.y + vol_orig_.y;
const float z = k*voxel_size_.z + vol_orig_.z;
float val = 0.0f;
for( int n = 0; n < proj_shape_.z; ++n )
{
auto ip = map( make_float3( x, y, z ), n );
ip.z = 1.0f / ip.z;
ip.x *= ip.z;
ip.y *= ip.z;
ip.x = (ip.x)/pixel_shape_.x + displacement_;
ip.y = (ip.y)/pixel_shape_.y;
val += tex2DLayered( projTex_, ip.x + 0.5, ip.y + 0.5, n );// * ip.z * ip.z;
}
// linear volume address
const unsigned int l = vol_shape_.x * ( k*vol_shape_.y + j ) + i;
vol[l] = val;
}
__global__
void kernel_project( const float* vol, float* proj )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z*blockDim.z + threadIdx.z;
if( i >= vol_shape_.x || j >= vol_shape_.y || k >= vol_shape_.z )
return;
const float x = i*voxel_size_.x + vol_orig_.x;
const float y = j*voxel_size_.y + vol_orig_.y;
const float z = k*voxel_size_.z + vol_orig_.z;
const float v = vol[vol_shape_.x * ( k*vol_shape_.y + j ) + i];
for( int n = 0; n < proj_shape_.z; ++n )
{
float3 ip = map( make_float3( x, y, z ), n );
ip.x = (ip.x / pixel_shape_.x) + displacement_;
ip.y = (ip.y / pixel_shape_.y);
const float vz = v;
// four neighbours on projection
const int u1 = ((int)ip.x),
v1 = ((int)ip.y);
const int u2 = u1+1,
v2 = v1+1;
// simulate hipAddressModeBorder
if( u1 >= -1 && v1 >= -1 && u2 <= proj_shape_.x && v2 <= proj_shape_.y )
{
const float wu2 = ip.x - ((float)u1);
const float wu1 = 1.0f - wu2;
const float wv2 = ip.y - ((float)v1);
const float wv1 = 1.0f - wv2;
const unsigned int l1 = proj_shape_.x * ( n*proj_shape_.y + v1 ) + u1;
const unsigned int l2 = l1 + proj_shape_.x;
if( u1 >= 0 )
{
const float vzwu1 = vz*wu1;
if( v1 >= 0 )
atomicAdd( &proj[l1], vzwu1*wv1 );
if( v2 < proj_shape_.y )
atomicAdd( &proj[l2], vzwu1*wv2 );
}
if( u2 < proj_shape_.x )
{
const float vzwu2 = vz*wu2;
if( v1 >= 0 )
atomicAdd( &proj[l1 + 1], vzwu2*wv1 );
if( v2 < proj_shape_.y )
atomicAdd( &proj[ l2 + 1], vzwu2*wv2 );
}
}
}
}
__host__
void cuda_init_backproject( float* geom,
int U, int V, int N,
int X, int Y, int Z,
float ox, float oy, float oz,
float sx, float sy, float sz,
float displacement,
float px, float py )
{
proj_shape_host_ = make_int3( U, V, N );
vol_shape_host_ = make_int3( X, Y, Z );
auto vol_orig = make_float3( ox, oy, oz );
auto voxel_size = make_float3( sx, sy, sz );
pixel_dim_host_ = make_float2( px, py );
gpuErrchk( hipMemcpyToSymbol( geom_, geom, 12 * sizeof(float) * N ) );
gpuErrchk( hipMemcpyToSymbol( proj_shape_, &proj_shape_host_, sizeof(int3) ) );
gpuErrchk( hipMemcpyToSymbol( vol_shape_, &vol_shape_host_, sizeof(int3) ) );
gpuErrchk( hipMemcpyToSymbol( vol_orig_, &vol_orig, sizeof(float3) ) );
gpuErrchk( hipMemcpyToSymbol( voxel_size_, &voxel_size, sizeof(float3) ) );
gpuErrchk( hipMemcpyToSymbol( displacement_, &displacement, sizeof(float) ) );
gpuErrchk( hipMemcpyToSymbol( pixel_shape_, &pixel_dim_host_, sizeof(float2) ) );
}
__host__
void cuda_backproject( const float* proj, float* vol )
{
// set texture properties
projTex_.addressMode[0] = hipAddressModeBorder;
projTex_.addressMode[1] = hipAddressModeBorder;
projTex_.addressMode[2] = hipAddressModeBorder;
projTex_.filterMode = hipFilterModeLinear;
projTex_.normalized = false;
// malloc cuda array for texture
hipExtent projExtent = make_hipExtent( proj_shape_host_.x,
proj_shape_host_.y,
proj_shape_host_.z );
hipArray *projArray;
static hipChannelFormatDesc channelDesc = hipCreateChannelDesc<float>();
gpuErrchk( hipMalloc3DArray( &projArray, &channelDesc, projExtent, hipArrayLayered ) );
// copy data to 3D array
hipMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_hipPitchedPtr( const_cast<float*>( proj ),
proj_shape_host_.x*sizeof(float),
proj_shape_host_.x,
proj_shape_host_.y
);
copyParams.dstArray = projArray;
copyParams.extent = projExtent;
copyParams.kind = hipMemcpyDeviceToDevice;
gpuErrchk( hipMemcpy3D( ©Params ) );
// bind texture reference
gpuErrchk( hipBindTextureToArray( projTex_, (hipArray*)projArray,
channelDesc ) );
// launch kernel
const unsigned int gridsize_x = (vol_shape_host_.x-1) / BLOCKSIZE_X + 1;
const unsigned int gridsize_y = (vol_shape_host_.y-1) / BLOCKSIZE_Y + 1;
const unsigned int gridsize_z = (vol_shape_host_.z-1) / BLOCKSIZE_Z + 1;
const dim3 grid = dim3( gridsize_x, gridsize_y, gridsize_z );
const dim3 block = dim3( BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z );
// fprintf(stderr, "\n\n\n\n");
// fprintf(stderr, "Iniciano o backproject");
// fprintf(stderr, "Saiu do backproject");
// fprintf(stderr, "\n\n\n\n");
hipLaunchKernelGGL(( kernel_backproject), dim3(grid), dim3(block) , 0, 0, vol );
// check for errors
gpuErrchk( hipPeekAtLastError() );
gpuErrchk( hipDeviceSynchronize() );
// cleanup
gpuErrchk( hipUnbindTexture( projTex_ ) );
gpuErrchk( hipFreeArray( projArray ) );
}
__host__
void cuda_project( const float* vol, float* proj )
{
// set proj to zero
hipMemset( proj, 0, proj_shape_host_.x*proj_shape_host_.y*proj_shape_host_.z
* sizeof( float ) );
// launch kernel
const unsigned int gridsize_x = (vol_shape_host_.x-1) / BLOCKSIZE_X + 1;
const unsigned int gridsize_y = (vol_shape_host_.y-1) / BLOCKSIZE_Y + 1;
const unsigned int gridsize_z = (vol_shape_host_.z-1) / BLOCKSIZE_Z + 1;
const dim3 grid = dim3( gridsize_x, gridsize_y, gridsize_z );
const dim3 block = dim3( BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z );
hipLaunchKernelGGL(( kernel_project), dim3(grid), dim3(block) , 0, 0, vol, proj );
}
#endif
| 4046ed0e4730d5cfd41065207ef33717b49e2adc.cu | #if GOOGLE_CUDA
#define EIGEN_USE_GPU
#include "third_party/eigen3/unsupported/Eigen/CXX11/Tensor"
#include <cuda.h>
#include "backproject.hcu"
#include <iostream>
using namespace std;
// from
// https://stackoverflow.com/questions/14038589/what-is-the-canonical-way-to-check-for-errors-using-the-cuda-runtime-api
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
__constant__ float geom_[MAX_PROJ_STACK_SIZE * 12];
__constant__ int3 proj_shape_;
__constant__ int3 vol_shape_;
__constant__ float3 vol_orig_;
__constant__ float3 voxel_size_;
static int3 proj_shape_host_;
static int3 vol_shape_host_;
static float2 pixel_dim_host_;
__constant__ float displacement_;
__constant__ float2 pixel_shape_;
texture<float, cudaTextureType2DLayered> projTex_;
inline __device__
float3 map( float3&& vp, int n )
{
const float* matrix = &(geom_[n*12]);
float3 mat = make_float3(
(matrix[0] * vp.x + matrix[1] * vp.y + matrix[2] * vp.z + matrix[3]),
(matrix[4] * vp.x + matrix[5] * vp.y + matrix[6] * vp.z + matrix[7]),
(matrix[8] * vp.x + matrix[9] * vp.y + matrix[10] * vp.z + matrix[11])
);
return mat;
}
__global__
void kernel_backproject( float* vol )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z*blockDim.z + threadIdx.z;
if( i >= vol_shape_.x || j >= vol_shape_.y || k >= vol_shape_.z )
return;
const float x = i*voxel_size_.x + vol_orig_.x;
const float y = j*voxel_size_.y + vol_orig_.y;
const float z = k*voxel_size_.z + vol_orig_.z;
float val = 0.0f;
for( int n = 0; n < proj_shape_.z; ++n )
{
auto ip = map( make_float3( x, y, z ), n );
ip.z = 1.0f / ip.z;
ip.x *= ip.z;
ip.y *= ip.z;
ip.x = (ip.x)/pixel_shape_.x + displacement_;
ip.y = (ip.y)/pixel_shape_.y;
val += tex2DLayered( projTex_, ip.x + 0.5, ip.y + 0.5, n );// * ip.z * ip.z;
}
// linear volume address
const unsigned int l = vol_shape_.x * ( k*vol_shape_.y + j ) + i;
vol[l] = val;
}
__global__
void kernel_project( const float* vol, float* proj )
{
const int i = blockIdx.x*blockDim.x + threadIdx.x;
const int j = blockIdx.y*blockDim.y + threadIdx.y;
const int k = blockIdx.z*blockDim.z + threadIdx.z;
if( i >= vol_shape_.x || j >= vol_shape_.y || k >= vol_shape_.z )
return;
const float x = i*voxel_size_.x + vol_orig_.x;
const float y = j*voxel_size_.y + vol_orig_.y;
const float z = k*voxel_size_.z + vol_orig_.z;
const float v = vol[vol_shape_.x * ( k*vol_shape_.y + j ) + i];
for( int n = 0; n < proj_shape_.z; ++n )
{
float3 ip = map( make_float3( x, y, z ), n );
ip.x = (ip.x / pixel_shape_.x) + displacement_;
ip.y = (ip.y / pixel_shape_.y);
const float vz = v;
// four neighbours on projection
const int u1 = ((int)ip.x),
v1 = ((int)ip.y);
const int u2 = u1+1,
v2 = v1+1;
// simulate cudaAddressModeBorder
if( u1 >= -1 && v1 >= -1 && u2 <= proj_shape_.x && v2 <= proj_shape_.y )
{
const float wu2 = ip.x - ((float)u1);
const float wu1 = 1.0f - wu2;
const float wv2 = ip.y - ((float)v1);
const float wv1 = 1.0f - wv2;
const unsigned int l1 = proj_shape_.x * ( n*proj_shape_.y + v1 ) + u1;
const unsigned int l2 = l1 + proj_shape_.x;
if( u1 >= 0 )
{
const float vzwu1 = vz*wu1;
if( v1 >= 0 )
atomicAdd( &proj[l1], vzwu1*wv1 );
if( v2 < proj_shape_.y )
atomicAdd( &proj[l2], vzwu1*wv2 );
}
if( u2 < proj_shape_.x )
{
const float vzwu2 = vz*wu2;
if( v1 >= 0 )
atomicAdd( &proj[l1 + 1], vzwu2*wv1 );
if( v2 < proj_shape_.y )
atomicAdd( &proj[ l2 + 1], vzwu2*wv2 );
}
}
}
}
__host__
void cuda_init_backproject( float* geom,
int U, int V, int N,
int X, int Y, int Z,
float ox, float oy, float oz,
float sx, float sy, float sz,
float displacement,
float px, float py )
{
proj_shape_host_ = make_int3( U, V, N );
vol_shape_host_ = make_int3( X, Y, Z );
auto vol_orig = make_float3( ox, oy, oz );
auto voxel_size = make_float3( sx, sy, sz );
pixel_dim_host_ = make_float2( px, py );
gpuErrchk( cudaMemcpyToSymbol( geom_, geom, 12 * sizeof(float) * N ) );
gpuErrchk( cudaMemcpyToSymbol( proj_shape_, &proj_shape_host_, sizeof(int3) ) );
gpuErrchk( cudaMemcpyToSymbol( vol_shape_, &vol_shape_host_, sizeof(int3) ) );
gpuErrchk( cudaMemcpyToSymbol( vol_orig_, &vol_orig, sizeof(float3) ) );
gpuErrchk( cudaMemcpyToSymbol( voxel_size_, &voxel_size, sizeof(float3) ) );
gpuErrchk( cudaMemcpyToSymbol( displacement_, &displacement, sizeof(float) ) );
gpuErrchk( cudaMemcpyToSymbol( pixel_shape_, &pixel_dim_host_, sizeof(float2) ) );
}
__host__
void cuda_backproject( const float* proj, float* vol )
{
// set texture properties
projTex_.addressMode[0] = cudaAddressModeBorder;
projTex_.addressMode[1] = cudaAddressModeBorder;
projTex_.addressMode[2] = cudaAddressModeBorder;
projTex_.filterMode = cudaFilterModeLinear;
projTex_.normalized = false;
// malloc cuda array for texture
cudaExtent projExtent = make_cudaExtent( proj_shape_host_.x,
proj_shape_host_.y,
proj_shape_host_.z );
cudaArray *projArray;
static cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<float>();
gpuErrchk( cudaMalloc3DArray( &projArray, &channelDesc, projExtent, cudaArrayLayered ) );
// copy data to 3D array
cudaMemcpy3DParms copyParams = {0};
copyParams.srcPtr = make_cudaPitchedPtr( const_cast<float*>( proj ),
proj_shape_host_.x*sizeof(float),
proj_shape_host_.x,
proj_shape_host_.y
);
copyParams.dstArray = projArray;
copyParams.extent = projExtent;
copyParams.kind = cudaMemcpyDeviceToDevice;
gpuErrchk( cudaMemcpy3D( ©Params ) );
// bind texture reference
gpuErrchk( cudaBindTextureToArray( projTex_, (cudaArray*)projArray,
channelDesc ) );
// launch kernel
const unsigned int gridsize_x = (vol_shape_host_.x-1) / BLOCKSIZE_X + 1;
const unsigned int gridsize_y = (vol_shape_host_.y-1) / BLOCKSIZE_Y + 1;
const unsigned int gridsize_z = (vol_shape_host_.z-1) / BLOCKSIZE_Z + 1;
const dim3 grid = dim3( gridsize_x, gridsize_y, gridsize_z );
const dim3 block = dim3( BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z );
// fprintf(stderr, "\n\n\n\n");
// fprintf(stderr, "Iniciano o backproject");
// fprintf(stderr, "Saiu do backproject");
// fprintf(stderr, "\n\n\n\n");
kernel_backproject<<< grid, block >>>( vol );
// check for errors
gpuErrchk( cudaPeekAtLastError() );
gpuErrchk( cudaDeviceSynchronize() );
// cleanup
gpuErrchk( cudaUnbindTexture( projTex_ ) );
gpuErrchk( cudaFreeArray( projArray ) );
}
__host__
void cuda_project( const float* vol, float* proj )
{
// set proj to zero
cudaMemset( proj, 0, proj_shape_host_.x*proj_shape_host_.y*proj_shape_host_.z
* sizeof( float ) );
// launch kernel
const unsigned int gridsize_x = (vol_shape_host_.x-1) / BLOCKSIZE_X + 1;
const unsigned int gridsize_y = (vol_shape_host_.y-1) / BLOCKSIZE_Y + 1;
const unsigned int gridsize_z = (vol_shape_host_.z-1) / BLOCKSIZE_Z + 1;
const dim3 grid = dim3( gridsize_x, gridsize_y, gridsize_z );
const dim3 block = dim3( BLOCKSIZE_X, BLOCKSIZE_Y, BLOCKSIZE_Z );
kernel_project<<< grid, block >>>( vol, proj );
}
#endif
|
7e618ecf5ce182d0c138641f3d423d8587747558.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#define FILTER_WIDTH 3
__constant__ int dc_xFilter[FILTER_WIDTH * FILTER_WIDTH];
__constant__ int dc_yFilter[FILTER_WIDTH * FILTER_WIDTH];
#define CHECK(call){\
const hipError_t error = call;\
if (error != hipSuccess){\
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\
fprintf(stderr, "code: %d, reason: %s\n", error, hipGetErrorString(error));\
exit(EXIT_FAILURE);\
}\
}
struct GpuTimer{
hipEvent_t start;
hipEvent_t stop;
GpuTimer(){
hipEventCreate(&start);
hipEventCreate(&stop);
}
~GpuTimer(){
hipEventDestroy(start);
hipEventDestroy(stop);
}
void Start(){
hipEventRecord(start, 0);
hipEventSynchronize(start);
}
void Stop(){
hipEventRecord(stop, 0);
}
float Eplapsed(){
float eplapsed;
hipEventSynchronize(stop);
hipEventElapsedTime(&eplapsed, start, stop);
return eplapsed;
}
};
void readRGBPnm (char *fileName, int &width, int &height, uchar3 *&pixels){
FILE *f = fopen(fileName, "r");
if (f == NULL){
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
// Check the type of input img
if (strcmp(type, "P3") != 0){
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int maxVal;
fscanf(f, "%i", &maxVal);
// Assume 1 byte per value
if (maxVal > 255){
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i< width * height; i++){
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
}
fclose(f);
}
void writeRGBPnm (const uchar3 *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++){
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
}
fclose(f);
}
void writeGrayScalePnm (int *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P2\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++){
fprintf(f, "%hhu\n", pixels[i]);
}
fclose(f);
}
void writeMatrixTxt (int *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
fprintf(f, "%d ", pixels[i * width + j]);
}
fprintf(f, "\n");
}
fclose(f);
}
void initSobelFilter(int *filter, bool horizontal){
int filterWidth = FILTER_WIDTH;
int val = 0;
int margin = filterWidth / 2;
for (int filterR = 0; filterR < filterWidth; filterR++){
for (int filterC = 0; filterC < filterWidth; filterC++){
if (horizontal == true){
if (filterC < margin){
val = 1;
}
else if (filterC == margin){
val = 0;
}
else{
val = -1;
}
if (filterR == margin){
val *= 2;
}
}
else{
if (filterR < margin){
val = 1;
}
else if (filterR == margin){
val = 0;
}
else{
val = -1;
}
if (filterC == margin){
val *= 2;
}
}
filter[filterR * filterWidth + filterC] = val;
}
}
}
void convertRgb2Gray (const uchar3 *in, int n, int *out){
for (int i = 0; i < n; i++){
out[i] = 0.299f * in[i].x + 0.587f * in[i].y + 0.114f * in[i].z;
}
}
void getPixelsImportance (int *in, int width, int height, int *xFilter, int *yFilter, int filterWidth, int *out){
int margin = filterWidth / 2;
for (int col = 0; col < width; col++){
for (int row = 0; row < height; row++){
int curIdx = row * width + col;
float xSum = 0, ySum = 0;
for (int filterRow = -margin; filterRow <= margin; filterRow++){
for (int filterCol = -margin; filterCol <= margin; filterCol++){
int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin;
int dx = min(width - 1, max(0, col + filterCol));
int dy = min(height - 1, max(0, row + filterRow));
int idx = dy * width + dx;
xSum += in[idx] * xFilter[filterIdx];
ySum += in[idx] * yFilter[filterIdx];
}
}
out[curIdx] = abs(xSum) + abs(ySum);
}
}
}
void getLeastImportantPixels (int *in, int width, int height, int *out){
int lastRow = (height - 1) * width;
memcpy(out + lastRow, in + lastRow, width * sizeof(int));
for (int row = height - 2; row >= 0; row--){
int below = row + 1;
for (int col = 0; col < width; col++ ){
int idx = row * width + col;
int leftCol = max(0, col - 1);
int rightCol = min(width - 1, col + 1);
int belowIdx = below * width + col;
int leftBelowIdx = below * width + leftCol;
int rightBelowIdx = below * width + rightCol;
out[idx] = min(out[belowIdx], min(out[leftBelowIdx], out[rightBelowIdx])) + in[idx];
}
}
}
void getSeamAt (int *in, int width, int height, int *out, int col){
out[0] = col;
for (int row = 1; row < height; row++){
int col = out[row - 1];
int idx = row * width + col;
int leftCol = max(0, col - 1);
int rightCol = min(width - 1, col + 1);
int leftIdx = row * width + leftCol;
int rightIdx = row * width + rightCol;
if (in[leftIdx] < in[idx]){
if (in[leftIdx] < in[rightIdx])
out[row] = leftCol;
else
out[row] = rightCol;
}
else{
if (in[idx] < in[rightIdx])
out[row] = col;
else
out[row] = rightCol;
}
}
}
void getLeastImportantSeam (int *in, int width, int height, int *out){
int minCol = 0;
for (int i = 0; i < width; i++){
if (in[i] < in[minCol])
minCol = i;
}
// printf("min col %d-%d\n", minCol, in[minCol]);
getSeamAt(in, width, height, out, minCol);
}
void removeSeam (const uchar3 *in, int width, int height, uchar3 *out, int *seam){
int newWidth = width - 1;
for (int row = 0; row < height; row++){
int col = seam[row];
memcpy(out + row * newWidth, in + row * width, col * sizeof(uchar3));
int nextIdxOut = row * newWidth + col;
int nextIdxIn = row * width + col + 1;
memcpy(out + nextIdxOut, in + nextIdxIn, (newWidth - col) * sizeof(uchar3));
}
}
void seamCarvingHost(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth){
// convert image to grayscale
int *grayScalePixels = (int *)malloc(width * height * sizeof(int));
convertRgb2Gray(in, width * height, grayScalePixels);
// edge detection
int *pixelsImportance = (int *)malloc(width * height * sizeof(int));
getPixelsImportance(grayScalePixels, width, height, xFilter, yFilter, filterWidth, pixelsImportance);
// find the least important seam
int *leastPixelsImportance = (int *)malloc(width * height * sizeof(int));
getLeastImportantPixels(pixelsImportance, width, height, leastPixelsImportance);
int *leastImportantSeam = (int *)malloc(height * sizeof(int));
getLeastImportantSeam(leastPixelsImportance, width, height, leastImportantSeam);
// remove the least important seam
removeSeam(in, width, height, out, leastImportantSeam);
// free memories
free(grayScalePixels);
free(pixelsImportance);
free(leastPixelsImportance);
free(leastImportantSeam);
}
__global__ void convertRgb2GrayKernel(uchar3 *in, int width, int height, int *out){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height){
int idx = row * width + col;
out[idx] = 0.299f * in[idx].x + 0.587f * in[idx].y + 0.114f * in[idx].z;
}
}
__global__ void getPixelsImportanceKernel (int *in, int width, int height, int filterWidth, int *out){
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (col < width && row < height){
int margin = filterWidth / 2;
int curIdx = row * width + col;
float xSum = 0, ySum = 0;
for (int filterRow = -margin; filterRow <= margin; filterRow++){
for (int filterCol = -margin; filterCol <= margin; filterCol++){
int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin;
int dx = min(width - 1, max(0, col + filterCol));
int dy = min(height - 1, max(0, row + filterRow));
int idx = dy * width + dx;
xSum += in[idx] * dc_xFilter[filterIdx];
ySum += in[idx] * dc_yFilter[filterIdx];
}
}
out[curIdx] = abs(xSum) + abs(ySum);
}
}
__global__ void upTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){
int xStart = baseWith * blockIdx.x * blockDim.x + threadIdx.x * baseWith;
int xStop = xStart + baseWith - 1;
for (int y = yStart; y >= yStop; y--){
for (int x = xStart; x <= xStop; x++){
if (x < width){
int idx = y * width + x;
int below = (y + 1) * width + x;
int left = (y + 1) * width + max(0, x - 1);
int right = (y + 1) * width + min(width - 1, x + 1);
out[idx] = in[idx] + min(out[below], min(out[left], out[right]));
}
}
xStart += 1;
xStop -= 1;
}
}
__global__ void downTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){
int xStop = baseWith * (threadIdx.x + blockDim.x * blockIdx.x);
int xStart = xStop - 1;
for (int y = yStart; y >= yStop; y--){
for (int x = xStart; x <= xStop; x++){
if (x >= 0 && x < width){
int idx = y * width + x;
int below = (y + 1) * width + x;
int left = (y + 1) * width + max(0, x - 1);
int right = (y + 1) * width + min(width - 1, x + 1);
out[idx] = in[idx] + min(out[below], min(out[left], out[right]));
}
}
xStart -= 1;
xStop += 1;
}
}
void seamCarvingDevice(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth, dim3 blockSize, int baseWith){
// prepare some values
int lastRowIdx = (height - 1) * width;
int stripHeight = baseWith % 2 == 0 ? baseWith / 2 + 1 : (baseWith + 1) / 2 + 1;
int gridSizeTriangle = (width - 1) / (blockSize.x * baseWith) + 1;
size_t dataSize = width * height * sizeof(uchar3);
size_t rowSize = width * sizeof(int);
size_t grayScaleSize = width * height * sizeof(int);
dim3 gridSize((width - 1) / blockSize.x + 1, (height - 1) / blockSize.y + 1);
// allocate device memories
uchar3 *d_in;
int *d_grayScalePixels, *d_pixelsImportance, *d_leastImportantPixels;
CHECK(hipMalloc(&d_in, dataSize));
CHECK(hipMalloc(&d_grayScalePixels, grayScaleSize));
CHECK(hipMalloc(&d_pixelsImportance, grayScaleSize));
CHECK(hipMalloc(&d_leastImportantPixels, grayScaleSize));
// allocate host memories
int *leastPixelsImportance = (int *)malloc(grayScaleSize);
int *leastImportantSeam = (int *)malloc(height * sizeof(int));
// copy data to device memories
CHECK(hipMemcpy(d_in, in, dataSize, hipMemcpyHostToDevice));
// convert image to grayscale
hipLaunchKernelGGL(( convertRgb2GrayKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_in, width, height, d_grayScalePixels);
CHECK(hipGetLastError());
// edge detection
hipLaunchKernelGGL(( getPixelsImportanceKernel), dim3(gridSize), dim3(blockSize), 0, 0, d_grayScalePixels, width, height, filterWidth, d_pixelsImportance);
CHECK(hipGetLastError());
// find the least important pixels
CHECK(hipMemcpy(d_leastImportantPixels + lastRowIdx, d_pixelsImportance + lastRowIdx, rowSize, hipMemcpyDeviceToDevice));
for (int y = height - 2; y >= 0; y -= stripHeight){
int yStart = y;
int yStop = max(0, yStart - stripHeight + 1);
hipLaunchKernelGGL(( upTriangle), dim3(gridSizeTriangle), dim3(blockSize.x), 0, 0, d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels);
yStart = max(0, yStart - 1);
yStop = max(0, yStart - stripHeight + 1);
hipLaunchKernelGGL(( downTriangle), dim3(gridSizeTriangle + 1), dim3(blockSize.x), 0, 0, d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels);
}
CHECK(hipMemcpy(leastPixelsImportance, d_leastImportantPixels, grayScaleSize, hipMemcpyDeviceToHost));
// find the least important seam
getLeastImportantSeam(leastPixelsImportance, width, height, leastImportantSeam);
// remove the least important seam
removeSeam(in, width, height, out, leastImportantSeam);
// free memories
CHECK(hipFree(d_in));
CHECK(hipFree(d_grayScalePixels));
CHECK(hipFree(d_pixelsImportance));
CHECK(hipFree(d_leastImportantPixels));
free(leastPixelsImportance);
free(leastImportantSeam);
}
void seamCarving(const uchar3 *in, int width, int height, uchar3 *out, int newWidth, int *xFilter, int *yFilter, int filterWidth, bool usingDevice=false, dim3 blockSize=dim3(1, 1), int baseWith = 0){
if (usingDevice == false){
printf("\nSeam carving by host\n");
}
else{
printf("\nSeam carving by device\n");
// copy x filter, y filter on host to dc_x filter, dc_y filter on device
size_t filterSize = filterWidth * filterWidth * sizeof(int);
CHECK(hipMemcpyToSymbol(dc_xFilter, xFilter, filterSize));
CHECK(hipMemcpyToSymbol(dc_yFilter, yFilter, filterSize));
}
GpuTimer timer;
timer.Start();
// allocate host memories
uchar3 *src = (uchar3 *)malloc(width * height * sizeof(uchar3));
uchar3 *dst = (uchar3 *)malloc(width * height * sizeof(uchar3));
// store the pointer for freeing
uchar3 *originalSrc = src;
uchar3 *originalDst = dst;
// copy input data to src pointer
memcpy(src, in, width * height * sizeof(uchar3));
// do the seam carving by decrease width by 1 until newWidth
for (int w = width; w > newWidth; w--){
// resize the dst pointer with current width - 1;
dst = (uchar3 *)realloc(dst, (w-1) * height * sizeof(uchar3));
// seamCarving the picture
if (usingDevice == false){
seamCarvingHost(src, w, height, dst, xFilter, yFilter, filterWidth);
}
else{
seamCarvingDevice(src, w, height, dst, xFilter, yFilter, filterWidth, blockSize, baseWith);
}
// swap src and dst
uchar3 * temp = src;
src = dst;
dst = temp;
}
// copy the output data to the out pointer
memcpy(out, src, newWidth * height * sizeof(uchar3));
// free memories
free(originalDst);
free(originalSrc);
timer.Stop();
printf("Time: %.3f ms\n", timer.Eplapsed());
}
float computeError (uchar3 *a1, uchar3* a2, int n){
float err = 0;
for (int i = 0; i < n; i++){
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
void printError (uchar3 *a1, uchar3 *a2, int width, int height){
float err = computeError(a1, a2, width * height);
printf("Error: %f\n", err);
}
void printDeviceInfo(int codeVer){
hipDeviceProp_t devProv;
CHECK(hipGetDeviceProperties(&devProv, 0));
printf("Vesrion of code: %d\n", codeVer);
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("CMEM: %lu bytes\n", devProv.totalConstMem);
printf("L2 cache: %i bytes\n", devProv.l2CacheSize);
printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor);
printf("****************************\n");
}
char *concatStr(const char *s1, const char *s2){
char *result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
int main (int argc, char **argv){
if (argc != 4 && argc != 6){
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
int seamCount = atoi(argv[2]);
int baseWith = atoi(argv[3]);
// Read input image file
int width, height;
uchar3 *inPixels;
readRGBPnm(argv[1], width, height, inPixels);
printf("\nImage size (width * height): %i x %i\n", width, height);
int newWidth = width - seamCount;
if (newWidth <= 0){
printf("The count of removed seams must be smaller than the width of the image");
return EXIT_FAILURE;
}
printf("\nNew image size (width * height): %i x %i\n", newWidth, height);
// print device info
int codeVer = 1;
printDeviceInfo(codeVer);
// init out pointer
uchar3 *correctOutPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3));
uchar3 *outPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3));
// Set up x sobel filter and y sobel filter
int filterWidth = FILTER_WIDTH;
int *xFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int));
int *yFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int));
initSobelFilter(xFilter, true);
initSobelFilter(yFilter, false);
// Seam carving not using device
seamCarving(inPixels, width, height, correctOutPixels, newWidth, xFilter, yFilter, filterWidth);
// get input block size
dim3 blockSize(32, 32); //default
if (argc == 5){
blockSize.x = atoi(argv[3]);
blockSize.y = atoi(argv[4]);
}
// Seam carving using device
seamCarving(inPixels, width, height, outPixels, newWidth, xFilter, yFilter, filterWidth, true, blockSize, baseWith);
printError(correctOutPixels, outPixels, newWidth, height);
// Write results to files
char *outFileNameBase = strtok(argv[1], "."); //get rid of extension
writeRGBPnm(correctOutPixels, newWidth, height, concatStr(outFileNameBase, "_host.pnm"));
writeRGBPnm(outPixels, newWidth, height, concatStr(outFileNameBase, "_device.pnm"));
// Free memories
free(inPixels);
free(xFilter);
free(yFilter);
free(correctOutPixels);
free(outPixels);
}
| 7e618ecf5ce182d0c138641f3d423d8587747558.cu | #include <stdio.h>
#include <stdint.h>
#include <stdlib.h>
#include <algorithm>
#define FILTER_WIDTH 3
__constant__ int dc_xFilter[FILTER_WIDTH * FILTER_WIDTH];
__constant__ int dc_yFilter[FILTER_WIDTH * FILTER_WIDTH];
#define CHECK(call){\
const cudaError_t error = call;\
if (error != cudaSuccess){\
fprintf(stderr, "Error: %s:%d, ", __FILE__, __LINE__);\
fprintf(stderr, "code: %d, reason: %s\n", error, cudaGetErrorString(error));\
exit(EXIT_FAILURE);\
}\
}
struct GpuTimer{
cudaEvent_t start;
cudaEvent_t stop;
GpuTimer(){
cudaEventCreate(&start);
cudaEventCreate(&stop);
}
~GpuTimer(){
cudaEventDestroy(start);
cudaEventDestroy(stop);
}
void Start(){
cudaEventRecord(start, 0);
cudaEventSynchronize(start);
}
void Stop(){
cudaEventRecord(stop, 0);
}
float Eplapsed(){
float eplapsed;
cudaEventSynchronize(stop);
cudaEventElapsedTime(&eplapsed, start, stop);
return eplapsed;
}
};
void readRGBPnm (char *fileName, int &width, int &height, uchar3 *&pixels){
FILE *f = fopen(fileName, "r");
if (f == NULL){
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
char type[3];
fscanf(f, "%s", type);
// Check the type of input img
if (strcmp(type, "P3") != 0){
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
fscanf(f, "%i", &width);
fscanf(f, "%i", &height);
int maxVal;
fscanf(f, "%i", &maxVal);
// Assume 1 byte per value
if (maxVal > 255){
fclose(f);
printf("Cannot read %s\n", fileName);
exit(EXIT_FAILURE);
}
pixels = (uchar3 *)malloc(width * height * sizeof(uchar3));
for (int i = 0; i< width * height; i++){
fscanf(f, "%hhu%hhu%hhu", &pixels[i].x, &pixels[i].y, &pixels[i].z);
}
fclose(f);
}
void writeRGBPnm (const uchar3 *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P3\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++){
fprintf(f, "%hhu\n%hhu\n%hhu\n", pixels[i].x, pixels[i].y, pixels[i].z);
}
fclose(f);
}
void writeGrayScalePnm (int *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
fprintf(f, "P2\n%i\n%i\n255\n", width, height);
for (int i = 0; i < width * height; i++){
fprintf(f, "%hhu\n", pixels[i]);
}
fclose(f);
}
void writeMatrixTxt (int *pixels, int width, int height, char *fileName){
FILE *f = fopen(fileName, "w");
if (f == NULL){
printf("Cannot write %s\n", fileName);
exit(EXIT_FAILURE);
}
for (int i = 0; i < height; i++){
for (int j = 0; j < width; j++){
fprintf(f, "%d ", pixels[i * width + j]);
}
fprintf(f, "\n");
}
fclose(f);
}
void initSobelFilter(int *filter, bool horizontal){
int filterWidth = FILTER_WIDTH;
int val = 0;
int margin = filterWidth / 2;
for (int filterR = 0; filterR < filterWidth; filterR++){
for (int filterC = 0; filterC < filterWidth; filterC++){
if (horizontal == true){
if (filterC < margin){
val = 1;
}
else if (filterC == margin){
val = 0;
}
else{
val = -1;
}
if (filterR == margin){
val *= 2;
}
}
else{
if (filterR < margin){
val = 1;
}
else if (filterR == margin){
val = 0;
}
else{
val = -1;
}
if (filterC == margin){
val *= 2;
}
}
filter[filterR * filterWidth + filterC] = val;
}
}
}
void convertRgb2Gray (const uchar3 *in, int n, int *out){
for (int i = 0; i < n; i++){
out[i] = 0.299f * in[i].x + 0.587f * in[i].y + 0.114f * in[i].z;
}
}
void getPixelsImportance (int *in, int width, int height, int *xFilter, int *yFilter, int filterWidth, int *out){
int margin = filterWidth / 2;
for (int col = 0; col < width; col++){
for (int row = 0; row < height; row++){
int curIdx = row * width + col;
float xSum = 0, ySum = 0;
for (int filterRow = -margin; filterRow <= margin; filterRow++){
for (int filterCol = -margin; filterCol <= margin; filterCol++){
int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin;
int dx = min(width - 1, max(0, col + filterCol));
int dy = min(height - 1, max(0, row + filterRow));
int idx = dy * width + dx;
xSum += in[idx] * xFilter[filterIdx];
ySum += in[idx] * yFilter[filterIdx];
}
}
out[curIdx] = abs(xSum) + abs(ySum);
}
}
}
void getLeastImportantPixels (int *in, int width, int height, int *out){
int lastRow = (height - 1) * width;
memcpy(out + lastRow, in + lastRow, width * sizeof(int));
for (int row = height - 2; row >= 0; row--){
int below = row + 1;
for (int col = 0; col < width; col++ ){
int idx = row * width + col;
int leftCol = max(0, col - 1);
int rightCol = min(width - 1, col + 1);
int belowIdx = below * width + col;
int leftBelowIdx = below * width + leftCol;
int rightBelowIdx = below * width + rightCol;
out[idx] = min(out[belowIdx], min(out[leftBelowIdx], out[rightBelowIdx])) + in[idx];
}
}
}
void getSeamAt (int *in, int width, int height, int *out, int col){
out[0] = col;
for (int row = 1; row < height; row++){
int col = out[row - 1];
int idx = row * width + col;
int leftCol = max(0, col - 1);
int rightCol = min(width - 1, col + 1);
int leftIdx = row * width + leftCol;
int rightIdx = row * width + rightCol;
if (in[leftIdx] < in[idx]){
if (in[leftIdx] < in[rightIdx])
out[row] = leftCol;
else
out[row] = rightCol;
}
else{
if (in[idx] < in[rightIdx])
out[row] = col;
else
out[row] = rightCol;
}
}
}
void getLeastImportantSeam (int *in, int width, int height, int *out){
int minCol = 0;
for (int i = 0; i < width; i++){
if (in[i] < in[minCol])
minCol = i;
}
// printf("min col %d-%d\n", minCol, in[minCol]);
getSeamAt(in, width, height, out, minCol);
}
void removeSeam (const uchar3 *in, int width, int height, uchar3 *out, int *seam){
int newWidth = width - 1;
for (int row = 0; row < height; row++){
int col = seam[row];
memcpy(out + row * newWidth, in + row * width, col * sizeof(uchar3));
int nextIdxOut = row * newWidth + col;
int nextIdxIn = row * width + col + 1;
memcpy(out + nextIdxOut, in + nextIdxIn, (newWidth - col) * sizeof(uchar3));
}
}
void seamCarvingHost(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth){
// convert image to grayscale
int *grayScalePixels = (int *)malloc(width * height * sizeof(int));
convertRgb2Gray(in, width * height, grayScalePixels);
// edge detection
int *pixelsImportance = (int *)malloc(width * height * sizeof(int));
getPixelsImportance(grayScalePixels, width, height, xFilter, yFilter, filterWidth, pixelsImportance);
// find the least important seam
int *leastPixelsImportance = (int *)malloc(width * height * sizeof(int));
getLeastImportantPixels(pixelsImportance, width, height, leastPixelsImportance);
int *leastImportantSeam = (int *)malloc(height * sizeof(int));
getLeastImportantSeam(leastPixelsImportance, width, height, leastImportantSeam);
// remove the least important seam
removeSeam(in, width, height, out, leastImportantSeam);
// free memories
free(grayScalePixels);
free(pixelsImportance);
free(leastPixelsImportance);
free(leastImportantSeam);
}
__global__ void convertRgb2GrayKernel(uchar3 *in, int width, int height, int *out){
int col = blockIdx.x * blockDim.x + threadIdx.x;
int row = blockIdx.y * blockDim.y + threadIdx.y;
if (col < width && row < height){
int idx = row * width + col;
out[idx] = 0.299f * in[idx].x + 0.587f * in[idx].y + 0.114f * in[idx].z;
}
}
__global__ void getPixelsImportanceKernel (int *in, int width, int height, int filterWidth, int *out){
int col = blockDim.x * blockIdx.x + threadIdx.x;
int row = blockDim.y * blockIdx.y + threadIdx.y;
if (col < width && row < height){
int margin = filterWidth / 2;
int curIdx = row * width + col;
float xSum = 0, ySum = 0;
for (int filterRow = -margin; filterRow <= margin; filterRow++){
for (int filterCol = -margin; filterCol <= margin; filterCol++){
int filterIdx = (filterRow + margin) * filterWidth + filterCol + margin;
int dx = min(width - 1, max(0, col + filterCol));
int dy = min(height - 1, max(0, row + filterRow));
int idx = dy * width + dx;
xSum += in[idx] * dc_xFilter[filterIdx];
ySum += in[idx] * dc_yFilter[filterIdx];
}
}
out[curIdx] = abs(xSum) + abs(ySum);
}
}
__global__ void upTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){
int xStart = baseWith * blockIdx.x * blockDim.x + threadIdx.x * baseWith;
int xStop = xStart + baseWith - 1;
for (int y = yStart; y >= yStop; y--){
for (int x = xStart; x <= xStop; x++){
if (x < width){
int idx = y * width + x;
int below = (y + 1) * width + x;
int left = (y + 1) * width + max(0, x - 1);
int right = (y + 1) * width + min(width - 1, x + 1);
out[idx] = in[idx] + min(out[below], min(out[left], out[right]));
}
}
xStart += 1;
xStop -= 1;
}
}
__global__ void downTriangle (int *in, int width, int height, int yStart, int yStop, int baseWith, int *out){
int xStop = baseWith * (threadIdx.x + blockDim.x * blockIdx.x);
int xStart = xStop - 1;
for (int y = yStart; y >= yStop; y--){
for (int x = xStart; x <= xStop; x++){
if (x >= 0 && x < width){
int idx = y * width + x;
int below = (y + 1) * width + x;
int left = (y + 1) * width + max(0, x - 1);
int right = (y + 1) * width + min(width - 1, x + 1);
out[idx] = in[idx] + min(out[below], min(out[left], out[right]));
}
}
xStart -= 1;
xStop += 1;
}
}
void seamCarvingDevice(const uchar3 *in, int width, int height, uchar3 *out, int *xFilter, int *yFilter, int filterWidth, dim3 blockSize, int baseWith){
// prepare some values
int lastRowIdx = (height - 1) * width;
int stripHeight = baseWith % 2 == 0 ? baseWith / 2 + 1 : (baseWith + 1) / 2 + 1;
int gridSizeTriangle = (width - 1) / (blockSize.x * baseWith) + 1;
size_t dataSize = width * height * sizeof(uchar3);
size_t rowSize = width * sizeof(int);
size_t grayScaleSize = width * height * sizeof(int);
dim3 gridSize((width - 1) / blockSize.x + 1, (height - 1) / blockSize.y + 1);
// allocate device memories
uchar3 *d_in;
int *d_grayScalePixels, *d_pixelsImportance, *d_leastImportantPixels;
CHECK(cudaMalloc(&d_in, dataSize));
CHECK(cudaMalloc(&d_grayScalePixels, grayScaleSize));
CHECK(cudaMalloc(&d_pixelsImportance, grayScaleSize));
CHECK(cudaMalloc(&d_leastImportantPixels, grayScaleSize));
// allocate host memories
int *leastPixelsImportance = (int *)malloc(grayScaleSize);
int *leastImportantSeam = (int *)malloc(height * sizeof(int));
// copy data to device memories
CHECK(cudaMemcpy(d_in, in, dataSize, cudaMemcpyHostToDevice));
// convert image to grayscale
convertRgb2GrayKernel<<<gridSize, blockSize>>>(d_in, width, height, d_grayScalePixels);
CHECK(cudaGetLastError());
// edge detection
getPixelsImportanceKernel<<<gridSize, blockSize>>>(d_grayScalePixels, width, height, filterWidth, d_pixelsImportance);
CHECK(cudaGetLastError());
// find the least important pixels
CHECK(cudaMemcpy(d_leastImportantPixels + lastRowIdx, d_pixelsImportance + lastRowIdx, rowSize, cudaMemcpyDeviceToDevice));
for (int y = height - 2; y >= 0; y -= stripHeight){
int yStart = y;
int yStop = max(0, yStart - stripHeight + 1);
upTriangle<<<gridSizeTriangle, blockSize.x>>>(d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels);
yStart = max(0, yStart - 1);
yStop = max(0, yStart - stripHeight + 1);
downTriangle<<<gridSizeTriangle + 1, blockSize.x>>>(d_pixelsImportance, width, height, yStart, yStop, baseWith, d_leastImportantPixels);
}
CHECK(cudaMemcpy(leastPixelsImportance, d_leastImportantPixels, grayScaleSize, cudaMemcpyDeviceToHost));
// find the least important seam
getLeastImportantSeam(leastPixelsImportance, width, height, leastImportantSeam);
// remove the least important seam
removeSeam(in, width, height, out, leastImportantSeam);
// free memories
CHECK(cudaFree(d_in));
CHECK(cudaFree(d_grayScalePixels));
CHECK(cudaFree(d_pixelsImportance));
CHECK(cudaFree(d_leastImportantPixels));
free(leastPixelsImportance);
free(leastImportantSeam);
}
void seamCarving(const uchar3 *in, int width, int height, uchar3 *out, int newWidth, int *xFilter, int *yFilter, int filterWidth, bool usingDevice=false, dim3 blockSize=dim3(1, 1), int baseWith = 0){
if (usingDevice == false){
printf("\nSeam carving by host\n");
}
else{
printf("\nSeam carving by device\n");
// copy x filter, y filter on host to dc_x filter, dc_y filter on device
size_t filterSize = filterWidth * filterWidth * sizeof(int);
CHECK(cudaMemcpyToSymbol(dc_xFilter, xFilter, filterSize));
CHECK(cudaMemcpyToSymbol(dc_yFilter, yFilter, filterSize));
}
GpuTimer timer;
timer.Start();
// allocate host memories
uchar3 *src = (uchar3 *)malloc(width * height * sizeof(uchar3));
uchar3 *dst = (uchar3 *)malloc(width * height * sizeof(uchar3));
// store the pointer for freeing
uchar3 *originalSrc = src;
uchar3 *originalDst = dst;
// copy input data to src pointer
memcpy(src, in, width * height * sizeof(uchar3));
// do the seam carving by decrease width by 1 until newWidth
for (int w = width; w > newWidth; w--){
// resize the dst pointer with current width - 1;
dst = (uchar3 *)realloc(dst, (w-1) * height * sizeof(uchar3));
// seamCarving the picture
if (usingDevice == false){
seamCarvingHost(src, w, height, dst, xFilter, yFilter, filterWidth);
}
else{
seamCarvingDevice(src, w, height, dst, xFilter, yFilter, filterWidth, blockSize, baseWith);
}
// swap src and dst
uchar3 * temp = src;
src = dst;
dst = temp;
}
// copy the output data to the out pointer
memcpy(out, src, newWidth * height * sizeof(uchar3));
// free memories
free(originalDst);
free(originalSrc);
timer.Stop();
printf("Time: %.3f ms\n", timer.Eplapsed());
}
float computeError (uchar3 *a1, uchar3* a2, int n){
float err = 0;
for (int i = 0; i < n; i++){
err += abs((int)a1[i].x - (int)a2[i].x);
err += abs((int)a1[i].y - (int)a2[i].y);
err += abs((int)a1[i].z - (int)a2[i].z);
}
err /= (n * 3);
return err;
}
void printError (uchar3 *a1, uchar3 *a2, int width, int height){
float err = computeError(a1, a2, width * height);
printf("Error: %f\n", err);
}
void printDeviceInfo(int codeVer){
cudaDeviceProp devProv;
CHECK(cudaGetDeviceProperties(&devProv, 0));
printf("Vesrion of code: %d\n", codeVer);
printf("**********GPU info**********\n");
printf("Name: %s\n", devProv.name);
printf("Compute capability: %d.%d\n", devProv.major, devProv.minor);
printf("Num SMs: %d\n", devProv.multiProcessorCount);
printf("Max num threads per SM: %d\n", devProv.maxThreadsPerMultiProcessor);
printf("Max num warps per SM: %d\n", devProv.maxThreadsPerMultiProcessor / devProv.warpSize);
printf("GMEM: %lu bytes\n", devProv.totalGlobalMem);
printf("CMEM: %lu bytes\n", devProv.totalConstMem);
printf("L2 cache: %i bytes\n", devProv.l2CacheSize);
printf("SMEM / one SM: %lu bytes\n", devProv.sharedMemPerMultiprocessor);
printf("****************************\n");
}
char *concatStr(const char *s1, const char *s2){
char *result = (char *)malloc(strlen(s1) + strlen(s2) + 1);
strcpy(result, s1);
strcat(result, s2);
return result;
}
int main (int argc, char **argv){
if (argc != 4 && argc != 6){
printf("The number of arguments is invalid\n");
return EXIT_FAILURE;
}
int seamCount = atoi(argv[2]);
int baseWith = atoi(argv[3]);
// Read input image file
int width, height;
uchar3 *inPixels;
readRGBPnm(argv[1], width, height, inPixels);
printf("\nImage size (width * height): %i x %i\n", width, height);
int newWidth = width - seamCount;
if (newWidth <= 0){
printf("The count of removed seams must be smaller than the width of the image");
return EXIT_FAILURE;
}
printf("\nNew image size (width * height): %i x %i\n", newWidth, height);
// print device info
int codeVer = 1;
printDeviceInfo(codeVer);
// init out pointer
uchar3 *correctOutPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3));
uchar3 *outPixels = (uchar3 *)malloc(newWidth * height * sizeof(uchar3));
// Set up x sobel filter and y sobel filter
int filterWidth = FILTER_WIDTH;
int *xFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int));
int *yFilter = (int *)malloc(filterWidth * filterWidth * sizeof(int));
initSobelFilter(xFilter, true);
initSobelFilter(yFilter, false);
// Seam carving not using device
seamCarving(inPixels, width, height, correctOutPixels, newWidth, xFilter, yFilter, filterWidth);
// get input block size
dim3 blockSize(32, 32); //default
if (argc == 5){
blockSize.x = atoi(argv[3]);
blockSize.y = atoi(argv[4]);
}
// Seam carving using device
seamCarving(inPixels, width, height, outPixels, newWidth, xFilter, yFilter, filterWidth, true, blockSize, baseWith);
printError(correctOutPixels, outPixels, newWidth, height);
// Write results to files
char *outFileNameBase = strtok(argv[1], "."); //get rid of extension
writeRGBPnm(correctOutPixels, newWidth, height, concatStr(outFileNameBase, "_host.pnm"));
writeRGBPnm(outPixels, newWidth, height, concatStr(outFileNameBase, "_device.pnm"));
// Free memories
free(inPixels);
free(xFilter);
free(yFilter);
free(correctOutPixels);
free(outPixels);
}
|
f70cddbbcc9e6b5c576f8d7d9e2f771c0ade31a6.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#if VECTOR_TYPE == 1
typedef float vector;
#elif VECTOR_TYPE == 2
typedef float2 vector;
#elif VECTOR_TYPE == 4
typedef float4 vector;
#endif // VECTOR_TYPE
inline __device__ float2 make_float2(float s)
{
return make_float2(s, s);
}
inline __device__ float4 make_float4(float s)
{
return make_float4(s, s, s, s);
}
inline __device__ float2 operator+(float2 a, float2 b)
{
return make_float2(a.x + b.x, a.y + b.y);
}
inline __device__ float4 operator+(float4 a, float4 b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
inline __device__ float2 operator+(float2 a, float b)
{
return make_float2(a.x + b, a.y + b);
}
inline __device__ float4 operator+(float4 a, float b)
{
return make_float4(a.x + b, a.y + b, a.z + b, a.w + b);
}
inline __host__ __device__ void operator+=(float2 &a, float2 b)
{
a.x += b.x;
a.y += b.y;
}
inline __host__ __device__ void operator+=(float4 &a, float4 b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
inline __device__ float2 operator-(float2 a, float2 b)
{
return make_float2(a.x - b.x, a.y - b.y);
}
inline __device__ float4 operator-(float4 a, float4 b)
{
return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
inline __device__ float2 operator-(float2 a, float b)
{
return make_float2(a.x - b, a.y - b);
}
inline __device__ float4 operator-(float4 a, float b)
{
return make_float4(a.x - b, a.y - b, a.z - b, a.w - b);
}
inline __device__ float2 operator*(float2 a, float2 b)
{
return make_float2(a.x * b.x, a.y * b.y);
}
inline __device__ float4 operator*(float4 a, float4 b)
{
return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
inline __device__ float2 operator*(float2 a, float b)
{
return make_float2(a.x * b, a.y * b);
}
inline __device__ float4 operator*(float4 a, float b)
{
return make_float4(a.x * b, a.y * b, a.z * b, a.w * b);
}
inline __device__ float2 operator*(float b, float2 a)
{
return make_float2(b * a.x, b * a.y);
}
inline __device__ float4 operator*(float b, float4 a)
{
return make_float4(b * a.x, b * a.y, b * a.z, b * a.w);
}
inline __device__ float2 rsqrtf(float2 x){
return make_float2(rsqrtf(x.x), rsqrtf(x.y));
}
inline __device__ float4 rsqrtf(float4 x){
return make_float4(rsqrtf(x.x), rsqrtf(x.y), rsqrtf(x.z), rsqrtf(x.w));
}
// method to calculate acceleration caused by body J
__device__ void updateAcc(vector bodyAcc[3], float bodyPos[3], // position of body I
vector bufferPosX, vector bufferPosY, vector bufferPosZ, vector bufferMass, // position and mass of body J
float softeningSqr) // to avoid infinities and zero division
{
#if USE_SOA == 0
{
float3 d;
d.x = bufferPosX - bodyPos[0];
d.y = bufferPosY - bodyPos[1];
d.z = bufferPosZ - bodyPos[2];
float distSqr = (d.x * d.x) + (d.y * d.y) + (d.z * d.z) + softeningSqr;
float invDist = rsqrtf(distSqr);
float f = bufferMass * invDist * invDist * invDist;
bodyAcc[0] += d.x * f;
bodyAcc[1] += d.y * f;
bodyAcc[2] += d.z * f;
}
#else // USE_SOA != 0
{
vector distanceX = bufferPosX - bodyPos[0];
vector distanceY = bufferPosY - bodyPos[1];
vector distanceZ = bufferPosZ - bodyPos[2];
vector invDist = rsqrtf(distanceX * distanceX + distanceY * distanceY + distanceZ * distanceZ + softeningSqr);
vector f = bufferMass * invDist * invDist * invDist;
bodyAcc[0] += distanceX * f;
bodyAcc[1] += distanceY * f;
bodyAcc[2] += distanceZ * f;
}
#endif // USE_SOA == 0
}
// method to calculate acceleration caused by body J
__device__ void updateAccGM(vector bodyAcc[3],
float bodyPos[3], // position of body I
float4* oldBodyInfo, // data; [X,Y,Z,mass]
vector* oldPosX,
vector* oldPosY,
vector* oldPosZ,
vector* mass,
int index,
float softeningSqr) // to avoid infinities and zero division
{
#if USE_SOA == 0
{
updateAcc(bodyAcc, bodyPos,
oldBodyInfo[index].x,
oldBodyInfo[index].y,
oldBodyInfo[index].z,
oldBodyInfo[index].w,
softeningSqr);
}
#else // USE_SOA != 0
{
updateAcc(bodyAcc, bodyPos,
oldPosX[index],
oldPosY[index],
oldPosZ[index],
mass[index],
softeningSqr);
}
#endif // USE_SOA == 0
}
// method to load thread specific data from memory
__device__ void loadThreadData(
float4* oldBodyInfo, // data; [X,Y,Z,mass]
float* oldPosX,
float* oldPosY,
float* oldPosZ,
float* mass,
float4* oldVel, // velocity info
float* oldVelX,
float* oldVelY,
float* oldVelZ,
float bodyPos[][3],
float bodyVel[][3],
float* bodyMass) // thread data
{
int index = (blockIdx.x*blockDim.x + threadIdx.x) * OUTER_UNROLL_FACTOR;
#if INNER_UNROLL_FACTOR2 > 0
# pragma unroll INNER_UNROLL_FACTOR2
#endif
for (int j = 0; j < OUTER_UNROLL_FACTOR; ++j) {
#if USE_SOA == 0
{
// store 'thread specific' body info to registers
bodyPos[j][0] = oldBodyInfo[index + j].x;
bodyPos[j][1] = oldBodyInfo[index + j].y;
bodyPos[j][2] = oldBodyInfo[index + j].z;
bodyVel[j][0] = oldVel[index + j].x;
bodyVel[j][1] = oldVel[index + j].y;
bodyVel[j][2] = oldVel[index + j].z;
bodyMass[j] = oldBodyInfo[index + j].w;
}
#else // USE_SOA != 0
{
// store 'thread specific' body info to registers
bodyPos[j][0] = oldPosX[index + j];
bodyPos[j][1] = oldPosY[index + j];
bodyPos[j][2] = oldPosZ[index + j];
bodyVel[j][0] = oldVelX[index + j];
bodyVel[j][1] = oldVelY[index + j];
bodyVel[j][2] = oldVelZ[index + j];
bodyMass[j] = mass[index + j];
}
#endif // USE_SOA == 0
}
}
// method will copy one item (X, Y, Z, mass) from input data to buffers
__device__ void fillBuffers(
float4* oldBodyInfo, // global (input) data; [X,Y,Z,mass]
vector* oldPosX,
vector* oldPosY,
vector* oldPosZ,
vector* mass,
vector bufferPosX[WORK_GROUP_SIZE_X], // buffers
vector bufferPosY[WORK_GROUP_SIZE_X],
vector bufferPosZ[WORK_GROUP_SIZE_X],
vector bufferMass[WORK_GROUP_SIZE_X],
int offset)
{
int tid = threadIdx.x;
#if USE_SOA == 0
{
bufferPosX[tid] = oldBodyInfo[offset + tid].x;
bufferPosY[tid] = oldBodyInfo[offset + tid].y;
bufferPosZ[tid] = oldBodyInfo[offset + tid].z;
bufferMass[tid] = oldBodyInfo[offset + tid].w;
}
#else // USE_SOA != 0
{
bufferPosX[tid] = oldPosX[offset + tid];
bufferPosY[tid] = oldPosY[offset + tid];
bufferPosZ[tid] = oldPosZ[offset + tid];
bufferMass[tid] = mass[offset + tid];
}
#endif // USE_SOA == 0
}
// kernel calculating new position and velocity for n-bodies
#if VECTOR_SIZE > 1
__global__ __attribute__((vec_type_hint(vector)))
#endif
__global__ void nbody_kernel(float timeDelta,
float4* oldBodyInfo, // pos XYZ, mass
vector* oldPosX,
vector* oldPosY,
vector* oldPosZ,
vector* mass,
float4* newBodyInfo,
float4* oldVel, // XYZ, W unused
vector* oldVelX,
vector* oldVelY,
vector* oldVelZ,
float4* newVel, // XYZ, W set to 0.f
float damping,
float softeningSqr,
int n)
{
// buffers for bodies info processed by the work group
__shared__ vector bufferPosX[WORK_GROUP_SIZE_X];
__shared__ vector bufferPosY[WORK_GROUP_SIZE_X];
__shared__ vector bufferPosZ[WORK_GROUP_SIZE_X];
__shared__ vector bufferMass[WORK_GROUP_SIZE_X];
// each thread holds a position/mass of the body it represents
float bodyPos[OUTER_UNROLL_FACTOR][3];
float bodyVel[OUTER_UNROLL_FACTOR][3];
vector bodyAcc[OUTER_UNROLL_FACTOR][3];
float bodyMass[OUTER_UNROLL_FACTOR];
// clear acceleration
#if INNER_UNROLL_FACTOR2 > 0
# pragma unroll INNER_UNROLL_FACTOR2
#endif
for (int j = 0; j < OUTER_UNROLL_FACTOR; ++j) {
#if VECTOR_TYPE == 1
bodyAcc[j][0] = bodyAcc[j][1] = bodyAcc[j][2] = .0f;
#elif VECTOR_TYPE == 2
bodyAcc[j][0] = bodyAcc[j][1] = bodyAcc[j][2] = make_float2(.0f);
#elif VECTOR_TYPE == 4
bodyAcc[j][0] = bodyAcc[j][1] = bodyAcc[j][2] = make_float4(.0f);
#endif
}
// load data
loadThreadData(oldBodyInfo, ( float*)oldPosX, ( float*)oldPosY, ( float*)oldPosZ, ( float*)mass,
oldVel, ( float*)oldVelX, ( float*)oldVelY, ( float*)oldVelZ, // velocity
bodyPos, bodyVel, bodyMass); // values to be filled
int blocks = n / (WORK_GROUP_SIZE_X * VECTOR_TYPE); // each calculates effect of WORK_GROUP_SIZE_X atoms to currect, i.e. thread's, one
// start the calculation, process whole blocks
for (int i = 0; i < blocks; i++) {
#if LOCAL_MEM == 1
// load new values to buffer.
// We know that all threads can be used now, so no condition is necessary
fillBuffers(oldBodyInfo, oldPosX, oldPosY, oldPosZ, mass, bufferPosX, bufferPosY, bufferPosZ, bufferMass, i * WORK_GROUP_SIZE_X);
__syncthreads();
#endif // LOCAL_MEM == 1
// calculate the acceleration between the thread body and each other body loaded to buffer
#if INNER_UNROLL_FACTOR1 > 0
# pragma unroll INNER_UNROLL_FACTOR1
#endif
for(int index = 0; index < WORK_GROUP_SIZE_X; index++) {
#if INNER_UNROLL_FACTOR2 > 0
# pragma unroll INNER_UNROLL_FACTOR2
#endif
for (int j = 0; j < OUTER_UNROLL_FACTOR; ++j) {
#if LOCAL_MEM == 1
updateAcc(bodyAcc[j], bodyPos[j],
bufferPosX[index], bufferPosY[index], bufferPosZ[index], bufferMass[index],
softeningSqr);
#else // LOCAL_MEM != 1
updateAccGM(bodyAcc[j], bodyPos[j],
oldBodyInfo, oldPosX, oldPosY, oldPosZ, mass,
i * WORK_GROUP_SIZE_X + index,
softeningSqr);
#endif // LOCAL_MEM == 1
}
}
#if LOCAL_MEM == 1
__syncthreads(); // sync threads
#endif
}
// sum elements of acceleration vector, if any
float resAccX, resAccY, resAccZ;
int index = (blockIdx.x*blockDim.x + threadIdx.x) * OUTER_UNROLL_FACTOR;
#if INNER_UNROLL_FACTOR2 > 0
# pragma unroll INNER_UNROLL_FACTOR2
#endif
for (int j = 0; j < OUTER_UNROLL_FACTOR; ++j) {
resAccX = resAccY = resAccZ = 0.f;
for (int i = 0; i < VECTOR_TYPE; i++)
{
resAccX += ((float*)&bodyAcc[j][0])[i];
resAccY += ((float*)&bodyAcc[j][1])[i];
resAccZ += ((float*)&bodyAcc[j][2])[i];
}
// 'export' result
// calculate resulting position
float resPosX = bodyPos[j][0] + timeDelta * bodyVel[j][0] + damping * timeDelta * timeDelta * resAccX;
float resPosY = bodyPos[j][1] + timeDelta * bodyVel[j][1] + damping * timeDelta * timeDelta * resAccY;
float resPosZ = bodyPos[j][2] + timeDelta * bodyVel[j][2] + damping * timeDelta * timeDelta * resAccZ;
newBodyInfo[index + j] = make_float4(resPosX, resPosY, resPosZ, bodyMass[j]);
// calculate resulting velocity
float resVelX = bodyVel[j][0] + timeDelta * resAccX;
float resVelY = bodyVel[j][1] + timeDelta * resAccY;
float resVelZ = bodyVel[j][2] + timeDelta * resAccZ;
newVel[index + j] = make_float4(resVelX, resVelY, resVelZ, 0.f);
}
}
| f70cddbbcc9e6b5c576f8d7d9e2f771c0ade31a6.cu | #if VECTOR_TYPE == 1
typedef float vector;
#elif VECTOR_TYPE == 2
typedef float2 vector;
#elif VECTOR_TYPE == 4
typedef float4 vector;
#endif // VECTOR_TYPE
inline __device__ float2 make_float2(float s)
{
return make_float2(s, s);
}
inline __device__ float4 make_float4(float s)
{
return make_float4(s, s, s, s);
}
inline __device__ float2 operator+(float2 a, float2 b)
{
return make_float2(a.x + b.x, a.y + b.y);
}
inline __device__ float4 operator+(float4 a, float4 b)
{
return make_float4(a.x + b.x, a.y + b.y, a.z + b.z, a.w + b.w);
}
inline __device__ float2 operator+(float2 a, float b)
{
return make_float2(a.x + b, a.y + b);
}
inline __device__ float4 operator+(float4 a, float b)
{
return make_float4(a.x + b, a.y + b, a.z + b, a.w + b);
}
inline __host__ __device__ void operator+=(float2 &a, float2 b)
{
a.x += b.x;
a.y += b.y;
}
inline __host__ __device__ void operator+=(float4 &a, float4 b)
{
a.x += b.x;
a.y += b.y;
a.z += b.z;
a.w += b.w;
}
inline __device__ float2 operator-(float2 a, float2 b)
{
return make_float2(a.x - b.x, a.y - b.y);
}
inline __device__ float4 operator-(float4 a, float4 b)
{
return make_float4(a.x - b.x, a.y - b.y, a.z - b.z, a.w - b.w);
}
inline __device__ float2 operator-(float2 a, float b)
{
return make_float2(a.x - b, a.y - b);
}
inline __device__ float4 operator-(float4 a, float b)
{
return make_float4(a.x - b, a.y - b, a.z - b, a.w - b);
}
inline __device__ float2 operator*(float2 a, float2 b)
{
return make_float2(a.x * b.x, a.y * b.y);
}
inline __device__ float4 operator*(float4 a, float4 b)
{
return make_float4(a.x * b.x, a.y * b.y, a.z * b.z, a.w * b.w);
}
inline __device__ float2 operator*(float2 a, float b)
{
return make_float2(a.x * b, a.y * b);
}
inline __device__ float4 operator*(float4 a, float b)
{
return make_float4(a.x * b, a.y * b, a.z * b, a.w * b);
}
inline __device__ float2 operator*(float b, float2 a)
{
return make_float2(b * a.x, b * a.y);
}
inline __device__ float4 operator*(float b, float4 a)
{
return make_float4(b * a.x, b * a.y, b * a.z, b * a.w);
}
inline __device__ float2 rsqrtf(float2 x){
return make_float2(rsqrtf(x.x), rsqrtf(x.y));
}
inline __device__ float4 rsqrtf(float4 x){
return make_float4(rsqrtf(x.x), rsqrtf(x.y), rsqrtf(x.z), rsqrtf(x.w));
}
// method to calculate acceleration caused by body J
__device__ void updateAcc(vector bodyAcc[3], float bodyPos[3], // position of body I
vector bufferPosX, vector bufferPosY, vector bufferPosZ, vector bufferMass, // position and mass of body J
float softeningSqr) // to avoid infinities and zero division
{
#if USE_SOA == 0
{
float3 d;
d.x = bufferPosX - bodyPos[0];
d.y = bufferPosY - bodyPos[1];
d.z = bufferPosZ - bodyPos[2];
float distSqr = (d.x * d.x) + (d.y * d.y) + (d.z * d.z) + softeningSqr;
float invDist = rsqrtf(distSqr);
float f = bufferMass * invDist * invDist * invDist;
bodyAcc[0] += d.x * f;
bodyAcc[1] += d.y * f;
bodyAcc[2] += d.z * f;
}
#else // USE_SOA != 0
{
vector distanceX = bufferPosX - bodyPos[0];
vector distanceY = bufferPosY - bodyPos[1];
vector distanceZ = bufferPosZ - bodyPos[2];
vector invDist = rsqrtf(distanceX * distanceX + distanceY * distanceY + distanceZ * distanceZ + softeningSqr);
vector f = bufferMass * invDist * invDist * invDist;
bodyAcc[0] += distanceX * f;
bodyAcc[1] += distanceY * f;
bodyAcc[2] += distanceZ * f;
}
#endif // USE_SOA == 0
}
// method to calculate acceleration caused by body J
__device__ void updateAccGM(vector bodyAcc[3],
float bodyPos[3], // position of body I
float4* oldBodyInfo, // data; [X,Y,Z,mass]
vector* oldPosX,
vector* oldPosY,
vector* oldPosZ,
vector* mass,
int index,
float softeningSqr) // to avoid infinities and zero division
{
#if USE_SOA == 0
{
updateAcc(bodyAcc, bodyPos,
oldBodyInfo[index].x,
oldBodyInfo[index].y,
oldBodyInfo[index].z,
oldBodyInfo[index].w,
softeningSqr);
}
#else // USE_SOA != 0
{
updateAcc(bodyAcc, bodyPos,
oldPosX[index],
oldPosY[index],
oldPosZ[index],
mass[index],
softeningSqr);
}
#endif // USE_SOA == 0
}
// method to load thread specific data from memory
__device__ void loadThreadData(
float4* oldBodyInfo, // data; [X,Y,Z,mass]
float* oldPosX,
float* oldPosY,
float* oldPosZ,
float* mass,
float4* oldVel, // velocity info
float* oldVelX,
float* oldVelY,
float* oldVelZ,
float bodyPos[][3],
float bodyVel[][3],
float* bodyMass) // thread data
{
int index = (blockIdx.x*blockDim.x + threadIdx.x) * OUTER_UNROLL_FACTOR;
#if INNER_UNROLL_FACTOR2 > 0
# pragma unroll INNER_UNROLL_FACTOR2
#endif
for (int j = 0; j < OUTER_UNROLL_FACTOR; ++j) {
#if USE_SOA == 0
{
// store 'thread specific' body info to registers
bodyPos[j][0] = oldBodyInfo[index + j].x;
bodyPos[j][1] = oldBodyInfo[index + j].y;
bodyPos[j][2] = oldBodyInfo[index + j].z;
bodyVel[j][0] = oldVel[index + j].x;
bodyVel[j][1] = oldVel[index + j].y;
bodyVel[j][2] = oldVel[index + j].z;
bodyMass[j] = oldBodyInfo[index + j].w;
}
#else // USE_SOA != 0
{
// store 'thread specific' body info to registers
bodyPos[j][0] = oldPosX[index + j];
bodyPos[j][1] = oldPosY[index + j];
bodyPos[j][2] = oldPosZ[index + j];
bodyVel[j][0] = oldVelX[index + j];
bodyVel[j][1] = oldVelY[index + j];
bodyVel[j][2] = oldVelZ[index + j];
bodyMass[j] = mass[index + j];
}
#endif // USE_SOA == 0
}
}
// method will copy one item (X, Y, Z, mass) from input data to buffers
__device__ void fillBuffers(
float4* oldBodyInfo, // global (input) data; [X,Y,Z,mass]
vector* oldPosX,
vector* oldPosY,
vector* oldPosZ,
vector* mass,
vector bufferPosX[WORK_GROUP_SIZE_X], // buffers
vector bufferPosY[WORK_GROUP_SIZE_X],
vector bufferPosZ[WORK_GROUP_SIZE_X],
vector bufferMass[WORK_GROUP_SIZE_X],
int offset)
{
int tid = threadIdx.x;
#if USE_SOA == 0
{
bufferPosX[tid] = oldBodyInfo[offset + tid].x;
bufferPosY[tid] = oldBodyInfo[offset + tid].y;
bufferPosZ[tid] = oldBodyInfo[offset + tid].z;
bufferMass[tid] = oldBodyInfo[offset + tid].w;
}
#else // USE_SOA != 0
{
bufferPosX[tid] = oldPosX[offset + tid];
bufferPosY[tid] = oldPosY[offset + tid];
bufferPosZ[tid] = oldPosZ[offset + tid];
bufferMass[tid] = mass[offset + tid];
}
#endif // USE_SOA == 0
}
// kernel calculating new position and velocity for n-bodies
#if VECTOR_SIZE > 1
__global__ __attribute__((vec_type_hint(vector)))
#endif
__global__ void nbody_kernel(float timeDelta,
float4* oldBodyInfo, // pos XYZ, mass
vector* oldPosX,
vector* oldPosY,
vector* oldPosZ,
vector* mass,
float4* newBodyInfo,
float4* oldVel, // XYZ, W unused
vector* oldVelX,
vector* oldVelY,
vector* oldVelZ,
float4* newVel, // XYZ, W set to 0.f
float damping,
float softeningSqr,
int n)
{
// buffers for bodies info processed by the work group
__shared__ vector bufferPosX[WORK_GROUP_SIZE_X];
__shared__ vector bufferPosY[WORK_GROUP_SIZE_X];
__shared__ vector bufferPosZ[WORK_GROUP_SIZE_X];
__shared__ vector bufferMass[WORK_GROUP_SIZE_X];
// each thread holds a position/mass of the body it represents
float bodyPos[OUTER_UNROLL_FACTOR][3];
float bodyVel[OUTER_UNROLL_FACTOR][3];
vector bodyAcc[OUTER_UNROLL_FACTOR][3];
float bodyMass[OUTER_UNROLL_FACTOR];
// clear acceleration
#if INNER_UNROLL_FACTOR2 > 0
# pragma unroll INNER_UNROLL_FACTOR2
#endif
for (int j = 0; j < OUTER_UNROLL_FACTOR; ++j) {
#if VECTOR_TYPE == 1
bodyAcc[j][0] = bodyAcc[j][1] = bodyAcc[j][2] = .0f;
#elif VECTOR_TYPE == 2
bodyAcc[j][0] = bodyAcc[j][1] = bodyAcc[j][2] = make_float2(.0f);
#elif VECTOR_TYPE == 4
bodyAcc[j][0] = bodyAcc[j][1] = bodyAcc[j][2] = make_float4(.0f);
#endif
}
// load data
loadThreadData(oldBodyInfo, ( float*)oldPosX, ( float*)oldPosY, ( float*)oldPosZ, ( float*)mass,
oldVel, ( float*)oldVelX, ( float*)oldVelY, ( float*)oldVelZ, // velocity
bodyPos, bodyVel, bodyMass); // values to be filled
int blocks = n / (WORK_GROUP_SIZE_X * VECTOR_TYPE); // each calculates effect of WORK_GROUP_SIZE_X atoms to currect, i.e. thread's, one
// start the calculation, process whole blocks
for (int i = 0; i < blocks; i++) {
#if LOCAL_MEM == 1
// load new values to buffer.
// We know that all threads can be used now, so no condition is necessary
fillBuffers(oldBodyInfo, oldPosX, oldPosY, oldPosZ, mass, bufferPosX, bufferPosY, bufferPosZ, bufferMass, i * WORK_GROUP_SIZE_X);
__syncthreads();
#endif // LOCAL_MEM == 1
// calculate the acceleration between the thread body and each other body loaded to buffer
#if INNER_UNROLL_FACTOR1 > 0
# pragma unroll INNER_UNROLL_FACTOR1
#endif
for(int index = 0; index < WORK_GROUP_SIZE_X; index++) {
#if INNER_UNROLL_FACTOR2 > 0
# pragma unroll INNER_UNROLL_FACTOR2
#endif
for (int j = 0; j < OUTER_UNROLL_FACTOR; ++j) {
#if LOCAL_MEM == 1
updateAcc(bodyAcc[j], bodyPos[j],
bufferPosX[index], bufferPosY[index], bufferPosZ[index], bufferMass[index],
softeningSqr);
#else // LOCAL_MEM != 1
updateAccGM(bodyAcc[j], bodyPos[j],
oldBodyInfo, oldPosX, oldPosY, oldPosZ, mass,
i * WORK_GROUP_SIZE_X + index,
softeningSqr);
#endif // LOCAL_MEM == 1
}
}
#if LOCAL_MEM == 1
__syncthreads(); // sync threads
#endif
}
// sum elements of acceleration vector, if any
float resAccX, resAccY, resAccZ;
int index = (blockIdx.x*blockDim.x + threadIdx.x) * OUTER_UNROLL_FACTOR;
#if INNER_UNROLL_FACTOR2 > 0
# pragma unroll INNER_UNROLL_FACTOR2
#endif
for (int j = 0; j < OUTER_UNROLL_FACTOR; ++j) {
resAccX = resAccY = resAccZ = 0.f;
for (int i = 0; i < VECTOR_TYPE; i++)
{
resAccX += ((float*)&bodyAcc[j][0])[i];
resAccY += ((float*)&bodyAcc[j][1])[i];
resAccZ += ((float*)&bodyAcc[j][2])[i];
}
// 'export' result
// calculate resulting position
float resPosX = bodyPos[j][0] + timeDelta * bodyVel[j][0] + damping * timeDelta * timeDelta * resAccX;
float resPosY = bodyPos[j][1] + timeDelta * bodyVel[j][1] + damping * timeDelta * timeDelta * resAccY;
float resPosZ = bodyPos[j][2] + timeDelta * bodyVel[j][2] + damping * timeDelta * timeDelta * resAccZ;
newBodyInfo[index + j] = make_float4(resPosX, resPosY, resPosZ, bodyMass[j]);
// calculate resulting velocity
float resVelX = bodyVel[j][0] + timeDelta * resAccX;
float resVelY = bodyVel[j][1] + timeDelta * resAccY;
float resVelZ = bodyVel[j][2] + timeDelta * resAccZ;
newVel[index + j] = make_float4(resVelX, resVelY, resVelZ, 0.f);
}
}
|
94b41dd807dd0d4274ee555e17eb42576f54bab5.hip | // !!! This is a file automatically generated by hipify!!!
// --- Internal Includes ---
#include "bvh.cuh"
#include "empty.cuh"
#include "aabb_utils.cuh"
// --- Standard Includes ---
#include <thrust/sort.h>
namespace ray_tracer {
RT_DEVICE BVH::BVH(HittableList* hittable_list, hiprandState_t* rand_state,
const float ti, const float tf) :
BVH(hittable_list->objects(), hittable_list->size(), rand_state, ti, tf)
{
}
RT_DEVICE BVH::BVH(Hittable** hittable_objects, const size_t length,
hiprandState_t* rand_state, float ti, float tf) :
hittable_objects_(hittable_objects)
{
if (length == 1)
{
left_ = hittable_objects_[0];
right_ = new Empty();
}
else if (length == 2)
{
left_ = hittable_objects_[0];
right_ = hittable_objects_[1];
}
else
{
// Copy over the pointers
const auto objects = new Hittable * [length];
for (size_t index = 0; index < length; ++index)
{
objects[index] = hittable_objects_[index];
}
// We sort along a random direction, not the best but should work fine for now
const auto axis = static_cast<size_t>(uniform_rand(rand_state, 0, 3)) % 3;
thrust::sort(objects, objects + length,
[axis, ti, tf] RT_DEVICE(Hittable * a, Hittable * b) -> bool
{
return box_compare(a, b, axis, ti, tf);
}
);
const auto mid = length / 2;
left_ = new BVH(objects, mid, rand_state, ti, tf);
right_ = new BVH(objects + mid, length - mid, rand_state, ti, tf);
delete[] objects;
}
AABB box_left;
AABB box_right;
const auto left_has_bb = left_->bounding_box(ti, tf, box_left);
const auto right_has_bb = right_->bounding_box(ti, tf, box_right);
if (left_has_bb && right_has_bb)
{
box_ = enclosing_box(box_left, box_right);
}
else if(left_has_bb)
{
box_ = box_left;
}
else if(right_has_bb)
{
box_ = box_right;
}
else
{
printf("WARNING: Bounding box is not present for both the objects, this should never happen");
has_bounding_box_ = false;
}
}
RT_DEVICE BVH::~BVH()
{
delete left_;
delete right_;
delete[] hittable_objects_;
}
RT_DEVICE bool BVH::hit(const Ray& ray, const float t_min, const float t_max, HitRecord& rec) const
{
if (has_bounding_box_)
{
if (!box_.hit(ray, t_min, t_max))
{
return false;
}
}
const auto left_hit = left_->hit(ray, t_min, t_max, rec);
const auto right_hit = right_->hit(ray, t_min, left_hit ? rec.t : t_max, rec);
return left_hit || right_hit;
}
RT_DEVICE bool BVH::bounding_box(const float ti, const float tf, AABB& box_out) const
{
box_out = box_;
return true;
}
} // namespace ray_tracer
| 94b41dd807dd0d4274ee555e17eb42576f54bab5.cu | // --- Internal Includes ---
#include "bvh.cuh"
#include "empty.cuh"
#include "aabb_utils.cuh"
// --- Standard Includes ---
#include <thrust/sort.h>
namespace ray_tracer {
RT_DEVICE BVH::BVH(HittableList* hittable_list, curandState_t* rand_state,
const float ti, const float tf) :
BVH(hittable_list->objects(), hittable_list->size(), rand_state, ti, tf)
{
}
RT_DEVICE BVH::BVH(Hittable** hittable_objects, const size_t length,
curandState_t* rand_state, float ti, float tf) :
hittable_objects_(hittable_objects)
{
if (length == 1)
{
left_ = hittable_objects_[0];
right_ = new Empty();
}
else if (length == 2)
{
left_ = hittable_objects_[0];
right_ = hittable_objects_[1];
}
else
{
// Copy over the pointers
const auto objects = new Hittable * [length];
for (size_t index = 0; index < length; ++index)
{
objects[index] = hittable_objects_[index];
}
// We sort along a random direction, not the best but should work fine for now
const auto axis = static_cast<size_t>(uniform_rand(rand_state, 0, 3)) % 3;
thrust::sort(objects, objects + length,
[axis, ti, tf] RT_DEVICE(Hittable * a, Hittable * b) -> bool
{
return box_compare(a, b, axis, ti, tf);
}
);
const auto mid = length / 2;
left_ = new BVH(objects, mid, rand_state, ti, tf);
right_ = new BVH(objects + mid, length - mid, rand_state, ti, tf);
delete[] objects;
}
AABB box_left;
AABB box_right;
const auto left_has_bb = left_->bounding_box(ti, tf, box_left);
const auto right_has_bb = right_->bounding_box(ti, tf, box_right);
if (left_has_bb && right_has_bb)
{
box_ = enclosing_box(box_left, box_right);
}
else if(left_has_bb)
{
box_ = box_left;
}
else if(right_has_bb)
{
box_ = box_right;
}
else
{
printf("WARNING: Bounding box is not present for both the objects, this should never happen");
has_bounding_box_ = false;
}
}
RT_DEVICE BVH::~BVH()
{
delete left_;
delete right_;
delete[] hittable_objects_;
}
RT_DEVICE bool BVH::hit(const Ray& ray, const float t_min, const float t_max, HitRecord& rec) const
{
if (has_bounding_box_)
{
if (!box_.hit(ray, t_min, t_max))
{
return false;
}
}
const auto left_hit = left_->hit(ray, t_min, t_max, rec);
const auto right_hit = right_->hit(ray, t_min, left_hit ? rec.t : t_max, rec);
return left_hit || right_hit;
}
RT_DEVICE bool BVH::bounding_box(const float ti, const float tf, AABB& box_out) const
{
box_out = box_;
return true;
}
} // namespace ray_tracer
|
859761197ad3bb57962188646867b1d8106b5d21.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <math.h>
#include <stdio.h>
#include <pycuda-complex.hpp>
#include<hiprand/hiprand.h>
#include<hiprand/hiprand_kernel.h>
extern "C" {
// ##########################################
// Global variables
#define pi 3.14159265359
#define MPion 139.57018
#define MKaon 493.667
#define MKst_1_1410 1414.
#define GKst_1_1410 232.
#define MKst_1_1680 1717.
#define GKst_1_1680 322.
#define MBs 5366.77
__device__ double DCP_prod;
__device__ double MEta = 547.;
__device__ double sAdler_Stheo = 0.236;
__device__ double B0_Stheo = 0.411;
__device__ double B1_Stheo = 0.162;
__device__ double alpha_Stheo = 1.15;
__device__ double s0_Stheo = 1.21;
__device__ double phi0_Stheo = -0.19;
__device__ double phi1_Stheo = 5.03;
__device__ double a_Stheo = -5.2;
__device__ double b_Stheo = 7.6;
__device__ double c_Stheo = -1.73;
__device__ double sqrtsr1_Stheo = 1.401;
__device__ double sqrtsr2_Stheo = 1.817;
__device__ double e1_Stheo = 1.;
__device__ double e2_Stheo = 0.184;
__device__ double G1_Stheo = 0.497;
__device__ double G2_Stheo = 0.28;
__device__ double s_Kpi_palano = (139.57018*139.57018+493.667*493.667)/1000./1000.;
__device__ double s_A_palano = 0.87753*(139.57018*139.57018+493.667*493.667)/1000./1000.;
__device__ double s_a_palano = 1.7991;
__device__ double g_1_a_palano = 0.3139;
__device__ double g_2_a_palano = -0.00775;
__device__ double s_b_palano = 8.3627;
__device__ double g_1_b_palano = 1.1804;
__device__ double g_2_b_palano = -0.22335;
__device__ double C_11_0_palano = -0.1553;
__device__ double C_11_1_palano = 0.0909;
__device__ double C_11_2_palano = 0.8618;
__device__ double C_11_3_palano = 0.0629;
__device__ double C_12_0_palano = 0.0738;
__device__ double C_12_1_palano = 0.3866;
__device__ double C_12_2_palano = 1.2195;
__device__ double C_12_3_palano = 0.8390;
__device__ double C_22_0_palano = -0.0036;
__device__ double C_22_1_palano = 0.2590;
__device__ double C_22_2_palano = 1.6950;
__device__ double C_22_3_palano = 2.2300;
__device__ double A_1_0_palano = 1.;
__device__ double A_1_1_palano = 0.00491636810678;
__device__ double A_1_2_palano = 2.12489529189;
__device__ double A_1_3_palano = 0.56004179484;
__device__ double A_1_4_palano = 0.;
__device__ double A_2_0_palano = -4.20943829183;
__device__ double A_2_1_palano = -1.2110147687;
__device__ double A_2_2_palano = 2.28474898994;
__device__ double A_2_3_palano = 5.93332582489;
__device__ double A_2_4_palano = 0.;
// ##########################################
// Auxiliar elements
__device__ double fjjphhpindexdict[3][3][5][5];
__device__ double ghhpindexdict[5][5];
__device__ double reNj1j2hdict[3][3][5];
__device__ double imNj1j2hdict[3][3][5];
__device__ double spl_knot_vector[2][2][6];
__device__ pycuda::complex<double> Nj1j2hdict(int j1, int j2, int h) {
return pycuda::complex<double>(reNj1j2hdict[j1][j2][h],imNj1j2hdict[j1][j2][h]);
}
// ##########################################
// Initializer
__global__ void initialize() {
fjjphhpindexdict[0][0][0][0] = 1;
fjjphhpindexdict[0][1][0][0] = 2;
fjjphhpindexdict[0][1][0][1] = 3;
fjjphhpindexdict[0][1][0][2] = 3;
fjjphhpindexdict[0][2][0][0] = 4;
fjjphhpindexdict[0][2][0][1] = 5;
fjjphhpindexdict[0][2][0][2] = 5;
fjjphhpindexdict[1][0][0][0] = 2;
fjjphhpindexdict[1][1][0][0] = 6;
fjjphhpindexdict[1][1][0][1] = 5;
fjjphhpindexdict[1][1][0][2] = 5;
fjjphhpindexdict[1][2][0][0] = 7;
fjjphhpindexdict[1][2][0][1] = 8;
fjjphhpindexdict[1][2][0][2] = 8;
fjjphhpindexdict[1][0][1][0] = 3;
fjjphhpindexdict[1][0][2][0] = 3;
fjjphhpindexdict[1][1][1][0] = 5;
fjjphhpindexdict[1][1][2][0] = 5;
fjjphhpindexdict[1][1][1][1] = 9;
fjjphhpindexdict[1][1][1][2] = 9;
fjjphhpindexdict[1][1][2][1] = 9;
fjjphhpindexdict[1][1][2][2] = 9;
fjjphhpindexdict[1][2][1][0] = 10;
fjjphhpindexdict[1][2][2][0] = 10;
fjjphhpindexdict[1][2][1][1] = 11;
fjjphhpindexdict[1][2][1][2] = 11;
fjjphhpindexdict[1][2][2][1] = 11;
fjjphhpindexdict[1][2][2][2] = 11;
fjjphhpindexdict[2][0][0][0] = 4;
fjjphhpindexdict[2][1][0][0] = 7;
fjjphhpindexdict[2][1][0][1] = 10;
fjjphhpindexdict[2][1][0][2] = 10;
fjjphhpindexdict[2][2][0][0] = 12;
fjjphhpindexdict[2][2][0][1] = 13;
fjjphhpindexdict[2][2][0][2] = 13;
fjjphhpindexdict[2][0][1][0] = 5;
fjjphhpindexdict[2][0][2][0] = 5;
fjjphhpindexdict[2][1][1][0] = 8;
fjjphhpindexdict[2][1][2][0] = 8;
fjjphhpindexdict[2][1][1][1] = 11;
fjjphhpindexdict[2][1][1][2] = 11;
fjjphhpindexdict[2][1][2][1] = 11;
fjjphhpindexdict[2][1][2][2] = 11;
fjjphhpindexdict[2][2][1][0] = 13;
fjjphhpindexdict[2][2][2][0] = 13;
fjjphhpindexdict[2][2][1][1] = 14;
fjjphhpindexdict[2][2][1][2] = 14;
fjjphhpindexdict[2][2][2][1] = 14;
fjjphhpindexdict[2][2][2][2] = 14;
fjjphhpindexdict[0][2][0][3] = 9;
fjjphhpindexdict[0][2][0][4] = 9;
fjjphhpindexdict[1][2][0][3] = 11;
fjjphhpindexdict[1][2][0][4] = 11;
fjjphhpindexdict[1][2][1][3] = 15;
fjjphhpindexdict[1][2][1][4] = 15;
fjjphhpindexdict[1][2][2][3] = 15;
fjjphhpindexdict[1][2][2][4] = 15;
fjjphhpindexdict[2][2][0][3] = 16;
fjjphhpindexdict[2][2][0][4] = 16;
fjjphhpindexdict[2][2][1][3] = 17;
fjjphhpindexdict[2][2][1][4] = 17;
fjjphhpindexdict[2][2][2][3] = 17;
fjjphhpindexdict[2][2][2][4] = 17;
fjjphhpindexdict[2][0][3][0] = 9;
fjjphhpindexdict[2][0][4][0] = 9;
fjjphhpindexdict[2][1][3][0] = 11;
fjjphhpindexdict[2][1][4][0] = 11;
fjjphhpindexdict[2][1][3][1] = 15;
fjjphhpindexdict[2][1][3][2] = 15;
fjjphhpindexdict[2][1][4][1] = 15;
fjjphhpindexdict[2][1][4][2] = 15;
fjjphhpindexdict[2][2][3][0] = 16;
fjjphhpindexdict[2][2][4][0] = 16;
fjjphhpindexdict[2][2][3][1] = 17;
fjjphhpindexdict[2][2][3][2] = 17;
fjjphhpindexdict[2][2][4][1] = 17;
fjjphhpindexdict[2][2][4][2] = 17;
fjjphhpindexdict[2][2][3][3] = 18;
fjjphhpindexdict[2][2][3][4] = 18;
fjjphhpindexdict[2][2][4][3] = 18;
fjjphhpindexdict[2][2][4][4] = 18;
ghhpindexdict[0][0] = 1;
ghhpindexdict[0][1] = 2;
ghhpindexdict[0][2] = 3;
ghhpindexdict[1][0] = 2;
ghhpindexdict[1][1] = 4;
ghhpindexdict[1][2] = 5;
ghhpindexdict[2][0] = 3;
ghhpindexdict[2][1] = 5;
ghhpindexdict[2][2] = 6;
ghhpindexdict[0][3] = 7;
ghhpindexdict[0][4] = 8;
ghhpindexdict[1][3] = 9;
ghhpindexdict[1][4] = 10;
ghhpindexdict[2][3] = 11;
ghhpindexdict[2][4] = 12;
ghhpindexdict[3][0] = 7;
ghhpindexdict[3][1] = 9;
ghhpindexdict[3][2] = 11;
ghhpindexdict[3][3] = 13;
ghhpindexdict[3][4] = 14;
ghhpindexdict[4][0] = 8;
ghhpindexdict[4][1] = 10;
ghhpindexdict[4][2] = 12;
ghhpindexdict[4][3] = 14;
ghhpindexdict[4][4] = 15;
reNj1j2hdict[0][0][0] = pycuda::real(pycuda::complex<double>(1./(2.*sqrt(2.*pi)),0.));
reNj1j2hdict[0][1][0] = pycuda::real(pycuda::complex<double>(-sqrt(3.)/(2.*sqrt(2.*pi)),0.));
reNj1j2hdict[0][2][0] = pycuda::real(pycuda::complex<double>(sqrt(5.)/(4.*sqrt(2.*pi)),0.));
reNj1j2hdict[1][0][0] = pycuda::real(pycuda::complex<double>(sqrt(3.)/(2.*sqrt(2.*pi)),0.));
reNj1j2hdict[1][1][0] = pycuda::real(pycuda::complex<double>(-3./(2.*sqrt(2.*pi)),0.));
reNj1j2hdict[1][1][1] = pycuda::real(pycuda::complex<double>(-3./(4.*sqrt(pi)),0.));
reNj1j2hdict[1][1][2] = pycuda::real(pycuda::complex<double>(0.,-3./(4.*sqrt(pi))));
reNj1j2hdict[1][2][0] = pycuda::real(pycuda::complex<double>(sqrt(15.)/(4.*sqrt(2.*pi)),0.));
reNj1j2hdict[1][2][1] = pycuda::real(pycuda::complex<double>(3.*sqrt(5.)/(4.*sqrt(pi)),0.));
reNj1j2hdict[1][2][2] = pycuda::real(pycuda::complex<double>(0.,3.*sqrt(5.)/(4.*sqrt(pi))));
reNj1j2hdict[2][0][0] = pycuda::real(pycuda::complex<double>(sqrt(5.)/(4.*sqrt(2.*pi)),0.));
reNj1j2hdict[2][1][0] = pycuda::real(pycuda::complex<double>(-sqrt(15.)/(4.*sqrt(2.*pi)),0.));
reNj1j2hdict[2][1][1] = pycuda::real(pycuda::complex<double>(-3.*sqrt(5.)/(4.*sqrt(pi)),0.));
reNj1j2hdict[2][1][2] = pycuda::real(pycuda::complex<double>(0.,-3.*sqrt(5.)/(4.*sqrt(pi))));
reNj1j2hdict[2][2][0] = pycuda::real(pycuda::complex<double>(5./(8.*sqrt(2.*pi)),0.));
reNj1j2hdict[2][2][1] = pycuda::real(pycuda::complex<double>(15./(4.*sqrt(pi)),0.));
reNj1j2hdict[2][2][2] = pycuda::real(pycuda::complex<double>(0.,15./(4.*sqrt(pi))));
reNj1j2hdict[2][2][3] = pycuda::real(pycuda::complex<double>(15./(16.*sqrt(pi)),0.));
reNj1j2hdict[2][2][4] = pycuda::real(pycuda::complex<double>(0.,15./(16.*sqrt(pi))));
imNj1j2hdict[0][0][0] = pycuda::imag(pycuda::complex<double>(1./(2.*sqrt(2.*pi)),0.));
imNj1j2hdict[0][1][0] = pycuda::imag(pycuda::complex<double>(-sqrt(3.)/(2.*sqrt(2.*pi)),0.));
imNj1j2hdict[0][2][0] = pycuda::imag(pycuda::complex<double>(sqrt(5.)/(4.*sqrt(2.*pi)),0.));
imNj1j2hdict[1][0][0] = pycuda::imag(pycuda::complex<double>(sqrt(3.)/(2.*sqrt(2.*pi)),0.));
imNj1j2hdict[1][1][0] = pycuda::imag(pycuda::complex<double>(-3./(2.*sqrt(2.*pi)),0.));
imNj1j2hdict[1][1][1] = pycuda::imag(pycuda::complex<double>(-3./(4.*sqrt(pi)),0.));
imNj1j2hdict[1][1][2] = pycuda::imag(pycuda::complex<double>(0.,-3./(4.*sqrt(pi))));
imNj1j2hdict[1][2][0] = pycuda::imag(pycuda::complex<double>(sqrt(15.)/(4.*sqrt(2.*pi)),0.));
imNj1j2hdict[1][2][1] = pycuda::imag(pycuda::complex<double>(3.*sqrt(5.)/(4.*sqrt(pi)),0.));
imNj1j2hdict[1][2][2] = pycuda::imag(pycuda::complex<double>(0.,3.*sqrt(5.)/(4.*sqrt(pi))));
imNj1j2hdict[2][0][0] = pycuda::imag(pycuda::complex<double>(sqrt(5.)/(4.*sqrt(2.*pi)),0.));
imNj1j2hdict[2][1][0] = pycuda::imag(pycuda::complex<double>(-sqrt(15.)/(4.*sqrt(2.*pi)),0.));
imNj1j2hdict[2][1][1] = pycuda::imag(pycuda::complex<double>(-3.*sqrt(5.)/(4.*sqrt(pi)),0.));
imNj1j2hdict[2][1][2] = pycuda::imag(pycuda::complex<double>(0.,-3.*sqrt(5.)/(4.*sqrt(pi))));
imNj1j2hdict[2][2][0] = pycuda::imag(pycuda::complex<double>(5./(8.*sqrt(2.*pi)),0.));
imNj1j2hdict[2][2][1] = pycuda::imag(pycuda::complex<double>(15./(4.*sqrt(pi)),0.));
imNj1j2hdict[2][2][2] = pycuda::imag(pycuda::complex<double>(0.,15./(4.*sqrt(pi))));
imNj1j2hdict[2][2][3] = pycuda::imag(pycuda::complex<double>(15./(16.*sqrt(pi)),0.));
imNj1j2hdict[2][2][4] = pycuda::imag(pycuda::complex<double>(0.,15./(16.*sqrt(pi))));
}
// ##########################################
// Normalisation weights
__device__ int indexdictcpp[4050] = {0,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,2,
3,-1,-1,-1,-1,-1,-1,-1,-1,4,5,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,6,7,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,8,9,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,
11,-1,-1,-1,-1,-1,-1,-1,-1,12,13,-1,-1,-1,-1,-1,-1,-1,-1,14,15,-1,-1,-1,-1,-1,-1,-1,-1,16,17,-1,-1,-1,-1,-1,-1,-1,-1,18,19,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,20,21,-1,-1,-1,-1,-1,-1,-1,-1,22,23,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,24,
25,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,26,27,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,28,
29,-1,-1,-1,-1,-1,-1,-1,-1,30,31,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,32,33,-1,-1,-1,-1,-1,-1,-1,-1,34,35,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,36,37,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,38,39,-1,-1,-1,-1,-1,-1,-1,-1,40,
41,-1,-1,-1,-1,-1,-1,-1,-1,42,43,-1,-1,-1,-1,-1,-1,-1,-1,44,45,-1,-1,-1,-1,-1,-1,-1,-1,46,47,48,49,-1,-1,-1,-1,-1,-1,50,51,-1,-1,-1,-1,-1,-1,-1,-1,52,
53,-1,-1,-1,-1,-1,-1,-1,-1,54,55,-1,-1,-1,-1,-1,-1,-1,-1,56,57,-1,-1,-1,-1,-1,-1,-1,-1,58,59,-1,-1,-1,-1,-1,-1,-1,-1,60,61,-1,-1,-1,-1,-1,-1,-1,-1,62,
63,-1,-1,-1,-1,-1,-1,-1,-1,64,65,-1,-1,-1,-1,-1,-1,-1,-1,66,67,68,69,70,71,-1,-1,-1,-1,72,73,74,75,-1,-1,-1,-1,-1,-1,76,77,-1,-1,-1,-1,-1,-1,-1,-1,78,
79,80,81,-1,-1,-1,-1,-1,-1,82,83,84,85,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,86,
87,-1,-1,-1,-1,-1,-1,-1,-1,88,89,-1,-1,-1,-1,-1,-1,-1,-1,90,91,-1,-1,-1,-1,-1,-1,-1,-1,92,93,-1,-1,-1,-1,-1,-1,-1,-1,94,95,-1,-1,-1,-1,-1,-1,-1,-1,96,
97,-1,-1,-1,-1,-1,-1,-1,-1,98,99,-1,-1,-1,-1,-1,-1,-1,-1,100,101,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,102,103,-1,-1,-1,-1,-1,-1,-1,-1,104,
105,-1,-1,-1,-1,-1,-1,-1,-1,106,107,-1,-1,-1,-1,-1,-1,-1,-1,108,109,-1,-1,-1,-1,-1,-1,-1,-1,110,111,112,113,-1,-1,-1,-1,-1,-1,114,115,116,117,-1,-1,-1,-1,-1,-1,118,
119,-1,-1,-1,-1,-1,-1,-1,-1,120,121,122,123,-1,-1,-1,-1,-1,-1,124,125,-1,-1,-1,-1,-1,-1,-1,-1,126,127,-1,-1,-1,-1,-1,-1,-1,-1,128,129,-1,-1,-1,-1,-1,-1,-1,-1,130,
131,-1,-1,-1,-1,-1,-1,-1,-1,132,133,-1,-1,-1,-1,-1,-1,-1,-1,134,135,136,137,138,139,-1,-1,-1,-1,140,141,142,143,144,145,-1,-1,-1,-1,146,147,-1,-1,-1,-1,-1,-1,-1,-1,148,
149,150,151,152,153,-1,-1,-1,-1,154,155,156,157,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,158,
159,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,160,161,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,162,163,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,164,
165,-1,-1,-1,-1,-1,-1,-1,-1,166,167,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,168,169,-1,-1,-1,-1,-1,-1,-1,-1,170,171,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,172,173,-1,-1,-1,-1,-1,-1,-1,-1,174,175,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,176,177,-1,-1,-1,-1,-1,-1,-1,-1,178,
179,-1,-1,-1,-1,-1,-1,-1,-1,180,181,-1,-1,-1,-1,-1,-1,-1,-1,182,183,-1,-1,-1,-1,-1,-1,-1,-1,184,185,186,187,-1,-1,-1,-1,-1,-1,188,189,-1,-1,-1,-1,-1,-1,-1,-1,190,
191,-1,-1,-1,-1,-1,-1,-1,-1,192,193,194,195,-1,-1,-1,-1,-1,-1,196,197,-1,-1,-1,-1,-1,-1,-1,-1,198,199,-1,-1,-1,-1,-1,-1,-1,-1,200,201,-1,-1,-1,-1,-1,-1,-1,-1,202,
203,-1,-1,-1,-1,-1,-1,-1,-1,204,205,-1,-1,-1,-1,-1,-1,-1,-1,206,207,208,209,210,211,-1,-1,-1,-1,212,213,214,215,-1,-1,-1,-1,-1,-1,216,217,-1,-1,-1,-1,-1,-1,-1,-1,218,
219,220,221,222,223,-1,-1,-1,-1,224,225,226,227,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,228,
229,-1,-1,-1,-1,-1,-1,-1,-1,230,231,-1,-1,-1,-1,-1,-1,-1,-1,232,233,-1,-1,-1,-1,-1,-1,-1,-1,234,235,-1,-1,-1,-1,-1,-1,-1,-1,236,237,-1,-1,-1,-1,-1,-1,-1,-1,238,
239,-1,-1,-1,-1,-1,-1,-1,-1,240,241,-1,-1,-1,-1,-1,-1,-1,-1,242,243,-1,-1,-1,-1,-1,-1,-1,-1,244,245,-1,-1,-1,-1,-1,-1,-1,-1,246,247,-1,-1,-1,-1,-1,-1,-1,-1,248,
249,-1,-1,-1,-1,-1,-1,-1,-1,250,251,-1,-1,-1,-1,-1,-1,-1,-1,252,253,-1,-1,-1,-1,-1,-1,-1,-1,254,255,256,257,-1,-1,-1,-1,-1,-1,258,259,260,261,-1,-1,-1,-1,-1,-1,262,
263,-1,-1,-1,-1,-1,-1,-1,-1,264,265,266,267,-1,-1,-1,-1,-1,-1,268,269,270,271,-1,-1,-1,-1,-1,-1,272,273,-1,-1,-1,-1,-1,-1,-1,-1,274,275,-1,-1,-1,-1,-1,-1,-1,-1,276,
277,-1,-1,-1,-1,-1,-1,-1,-1,278,279,-1,-1,-1,-1,-1,-1,-1,-1,280,281,282,283,284,285,-1,-1,-1,-1,286,287,288,289,290,291,-1,-1,-1,-1,292,293,-1,-1,-1,-1,-1,-1,-1,-1,294,
295,296,297,298,299,-1,-1,-1,-1,300,301,302,303,304,305,-1,-1,-1,-1,306,307,-1,-1,-1,-1,-1,-1,-1,-1,308,309,-1,-1,-1,-1,-1,-1,-1,-1,310,311,-1,-1,-1,-1,-1,-1,-1,-1,312,
313,-1,-1,-1,-1,-1,-1,-1,-1,314,315,316,317,318,319,-1,-1,-1,-1,320,321,322,323,324,325,-1,-1,-1,-1,326,327,-1,-1,-1,-1,-1,-1,-1,-1,328,329,330,331,332,333,-1,-1,-1,-1,334,
335,336,337,338,339,340,341,-1,-1,342,343,-1,-1,-1,-1,-1,-1,-1,-1,344,345,-1,-1,-1,-1,-1,-1,-1,-1,346,347,-1,-1,-1,-1,-1,-1,-1,-1,348,349,-1,-1,-1,-1,-1,-1,-1,-1,350,
351,352,353,354,355,-1,-1,-1,-1,356,357,358,359,360,361,-1,-1,-1,-1,362,363,-1,-1,-1,-1,-1,-1,-1,-1,364,365,366,367,368,369,-1,-1,-1,-1,370,371,372,373,374,375,376,377,378,379};
__device__ int redindexcpp(int j1, int j2, int h, int j1p, int j2p, int hp, int part) {
return indexdictcpp[part+2*hp+10*j2p+30*j1p+90*h+450*j2+1350*j1];
}
__device__ double nw_comp_matrix[2][2][380];
__device__ double nw_comp(int year_opt,int trig_opt,int j1,int j2,int h,int j1p,int j2p,int hp,int part) {
return nw_comp_matrix[year_opt][trig_opt][redindexcpp(j1,j2,h,j1p,j2p,hp,part)];
}
// ##########################################
// Splines
__device__ double knots_wide[6] = {0.0,0.9,1.4,2.1,3.1,12.0};
__device__ double knots_narrow[6] = {0.0,0.9,1.4,2.0,3.1,12.0};
__device__ double a_2011_L0TIS_wide[5][4] = {{0.0,-0.00112621527248,1.58569721831,-0.819679115309},
{-0.695240812888,2.31634316102,-0.989268755348,0.134011986046},
{-0.968319206849,2.90151114808,-1.40724588896,0.233530351192},
{1.49093519128,-0.611709420676,0.265716286636,-0.0320192004899},
{0.480295052314,0.366329423485,-0.0497801147062,0.00190514374049}};
__device__ double a_2012_L0TIS_wide[5][4] = {{0.0,-0.00108246180814,2.40916212056,-1.38444396395},
{-1.71192960187,5.70534954441,-3.93131788635,0.963881964535},
{1.5097616167,-1.19827449537,0.999842142073,-0.210203756517},
{-1.60039623414,3.24480814869,-1.11591149796,0.125630154599},
{2.3184526579,-0.547626262968,0.107454441287,-0.00591457005072}};
__device__ double a_2011_L0noTIS_wide[5][4] = {{0.0,-0.00098639638413,2.21305126523,-1.24975680956},
{-1.38905016398,4.62918081687,-2.93157897172,0.655661796718},
{0.301283465128,1.00703732593,-0.344333621046,0.0396509989394},
{0.280998193193,1.03601628584,-0.358133125764,0.0418413965137},
{1.65203424402,-0.290792795608,0.0698698037346,-0.00418042386257}};
__device__ double a_2012_L0noTIS_wide[5][4] = {{0.0,-0.00143959572436,4.23974268964,-2.51791074478},
{-3.18832348534,10.6263053554,-7.56886281162,1.85564684828},
{2.64645621312,-1.87679399844,1.36192244113,-0.270730592851},
{-1.04557535896,3.39753681882,-1.14966366233,0.127933868015},
{2.9370024549,-0.456570742976,0.0935968414771,-0.00575005712493}};
__device__ double a_2011_L0TIS_narrow[5][4] = {{0.0,-0.00105007049752,2.10287696037,-1.21206867385},
{-1.42394206761,4.74542348821,-3.17098254931,0.741212626035},
{0.646630522127,0.308482224484,-0.00173878950165,-0.0133692215384},
{0.350645013584,0.752460487299,-0.223727920909,0.0236289670295},
{1.06879626479,0.0574754054844,0.000460815159922,-0.000477348676871}};
__device__ double a_2012_L0TIS_narrow[5][4] = {{0.0,-0.0217677276861,1.92932115124,-1.02522029051},
{-0.924217061529,3.05895581074,-1.49370500257,0.242567173866},
{-1.04708252048,3.32223893706,-1.68176437851,0.287343215756},
{1.38760726219,-0.329795736933,0.144252958489,-0.0169930070766},
{0.873682232338,0.167551066145,-0.0161814941165,0.000258009332581}};
__device__ double a_2011_L0noTIS_narrow[5][4] = {{0.0,-0.00141362744144,7.02846926232,-4.03198237274},
{-4.36857014682,14.5604868619,-9.15142017033,1.96056926898},
{-0.84323191785,7.00619065702,-3.75549430966,0.67582501644},
{5.39200898979,-2.34667070444,0.920936371063,-0.103580097015},
{2.42800757735,0.521717759207,-0.00435023011253,-0.00408691409238}};
__device__ double a_2012_L0noTIS_narrow[5][4] = {{0.0,-0.0135116503737,3.32589922088,-2.04848218086},
{-2.87136660569,9.55771036858,-7.3087919113,1.89029231253},
{3.9544829436,-5.06911009418,3.13893699067,-0.597262187936},
{-1.99801374513,3.85963493892,-1.32543552587,0.146799898155},
{2.48668454014,-0.480395659729,0.0745743446588,-0.00373879760085}};
__device__ double spline_knot(int wide_window,int i) {
if (wide_window == 0) {return knots_narrow[i];}
else {return knots_wide[i];}
}
__device__ double spline_coef(int year_opt,int trig_opt,int wide_window,int ibin,int deg) {
if (year_opt == 0) {
if (trig_opt == 0) {
if (wide_window == 0) {return a_2011_L0TIS_narrow[ibin][deg];}
else {return a_2011_L0TIS_wide[ibin][deg];}
}
else {
if (wide_window == 0) {return a_2011_L0noTIS_narrow[ibin][deg];}
else {return a_2011_L0noTIS_wide[ibin][deg];}
}
}
else {
if (trig_opt == 0) {
if (wide_window == 0) {return a_2012_L0TIS_narrow[ibin][deg];}
else {return a_2012_L0TIS_wide[ibin][deg];}
}
else {
if (wide_window == 0) {return a_2012_L0noTIS_narrow[ibin][deg];}
else {return a_2012_L0noTIS_wide[ibin][deg];}
}
}
}
// ##########################################
// Buffer variables
__device__ const int max_N_events = 100000;
__device__ int wide_window;
__device__ int year_opt[4];
__device__ int trig_opt[4];
__device__ double alt_fit;
__device__ double option;
__device__ double inftres;
__device__ double acctype;
__device__ double A_j1;
__device__ double A_j2;
__device__ double A_h;
__device__ double A_j1p;
__device__ double A_j2p;
__device__ double A_hp;
__device__ double qcomp;
__device__ int decision_SSK[4][max_N_events];
__device__ int decision_OS[4][max_N_events];
__device__ double etamistag_SSK[4][max_N_events];
__device__ double etamistag_OS[4][max_N_events];
__device__ double m1[4][max_N_events];
__device__ double m2[4][max_N_events];
__device__ double cos1[4][max_N_events];
__device__ double cos2[4][max_N_events];
__device__ double phi[4][max_N_events];
__device__ double t[4][max_N_events];
__device__ double t_err[4][max_N_events];
__device__ double max_fun[max_N_events];
__device__ double fun_ran[max_N_events];
__device__ double dec_accepted[max_N_events];
__device__ double m1_MCrew[max_N_events];
__device__ double m2_MCrew[max_N_events];
__device__ double cos1_MCrew[max_N_events];
__device__ double cos2_MCrew[max_N_events];
__device__ double phi_MCrew[max_N_events];
__device__ double IT_cosh_MCrew[max_N_events];
__device__ double IT_sinh_MCrew[max_N_events];
__device__ double IT_cos_MCrew[max_N_events];
__device__ double IT_sin_MCrew[max_N_events];
__device__ double fi_cos1_MCrew[18][max_N_events];
__device__ double fi_cos2_MCrew[18][max_N_events];
__device__ double gi_MCrew[15][max_N_events];
__device__ double reMj1j2_MCrew[3][3][max_N_events];
__device__ double imMj1j2_MCrew[3][3][max_N_events];
__device__ pycuda::complex<double> Mj1j2_MCrew(int j1, int j2, int iev) {
return pycuda::complex<double>(reMj1j2_MCrew[j1][j2][iev],imMj1j2_MCrew[j1][j2][iev]);
}
__device__ double phasespace_MCrew[max_N_events];
__device__ double reA00;
__device__ double reA01;
__device__ double reA10;
__device__ double reA02;
__device__ double reA20;
__device__ double reA110;
__device__ double reA11par;
__device__ double reA11perp;
__device__ double reA120;
__device__ double reA12par;
__device__ double reA12perp;
__device__ double reA210;
__device__ double reA21par;
__device__ double reA21perp;
__device__ double reA220;
__device__ double reA22par;
__device__ double reA22perp;
__device__ double reA22par2;
__device__ double reA22perp2;
__device__ double DCP_SS;
__device__ double DCP_SV;
__device__ double DCP_VS;
__device__ double DCP_ST;
__device__ double DCP_TS;
__device__ double DCP;
__device__ double DCP_VT;
__device__ double DCP_TV;
__device__ double DCP_TT;
__device__ double imA00;
__device__ double imA01;
__device__ double imA10;
__device__ double imA02;
__device__ double imA20;
__device__ double imA110;
__device__ double imA11par;
__device__ double imA11perp;
__device__ double imA120;
__device__ double imA12par;
__device__ double imA12perp;
__device__ double imA210;
__device__ double imA21par;
__device__ double imA21perp;
__device__ double imA220;
__device__ double imA22par;
__device__ double imA22perp;
__device__ double imA22par2;
__device__ double imA22perp2;
__device__ double phis;
__device__ double dphi_SS;
__device__ double dphi_SV;
__device__ double dphi_VS;
__device__ double dphi_ST;
__device__ double dphi_TS;
__device__ double dphi_VT;
__device__ double dphi_TV;
__device__ double dphi_TT;
__device__ double delta_m_freq;
__device__ double gamma_Bs_freq;
__device__ double delta_gamma_freq;
__device__ double p0metac_tag_SSK;
__device__ double p0metac_tag_OS;
__device__ double Dp0half_tag_SSK;
__device__ double Dp0half_tag_OS;
__device__ double p1_tag_SSK;
__device__ double p1_tag_OS;
__device__ double Dp1half_tag_SSK;
__device__ double Dp1half_tag_OS;
__device__ double etac_tag_SSK;
__device__ double etac_tag_OS;
__device__ double deltatmean_tres_11;
__device__ double p0_tres_11;
__device__ double p1_tres_11;
__device__ double deltatmean_tres_12;
__device__ double p0_tres_12;
__device__ double p1_tres_12;
__device__ double mv;
__device__ double ms;
__device__ double mt;
__device__ double gv;
__device__ double gs;
__device__ double gt;
__device__ double c1_mass_swave;
__device__ double c2_mass_swave;
__device__ double c3_mass_swave;
__device__ double c4_mass_swave;
__device__ double c5_mass_swave;
__device__ double c6_mass_swave;
__device__ double c7_mass_swave;
__device__ double c8_mass_swave;
__device__ double c9_mass_swave;
__device__ double res_mass;
__device__ double tag_eff_SSK;
__device__ double mu1_SSK;
__device__ double sigma1_SSK;
__device__ double c_SSK;
__device__ double mu2_SSK;
__device__ double sigma2_SSK;
__device__ double tag_eff_OS;
__device__ double mu1_OS;
__device__ double sigma1_OS;
__device__ double c_OS;
__device__ double mu2_OS;
__device__ double sigma2_OS;
__device__ double gamma1_dt;
__device__ double beta1_dt;
__device__ double c_dt;
__device__ double gamma2_dt;
__device__ double beta2_dt;
__device__ double pw_mass_altmodel;
__device__ double f_1410_rel2_892;
__device__ double delta_1410_rel2_892;
__device__ double f_1680_rel2_892;
__device__ double delta_1680_rel2_892;
__device__ double Im00;
__device__ double Im01;
__device__ double Im10;
__device__ double Im02;
__device__ double Im20;
__device__ double Im11;
__device__ double Im12;
__device__ double Im21;
__device__ double Im22;
__device__ double Ih1Re;
__device__ double Ih2Re;
__device__ double Ih3Re;
__device__ double Ih4Re;
__device__ double Ih5Re;
__device__ double Ih6Re;
__device__ double Ih7Re;
__device__ double Ih8Re;
__device__ double Ih9Re;
__device__ double Ih10Re;
__device__ double Ih11Re;
__device__ double Ih12Re;
__device__ double Ih13Re;
__device__ double Ih14Re;
__device__ double Ih15Re;
__device__ double Ih16Re;
__device__ double Ih17Re;
__device__ double Ih18Re;
__device__ double Ih19Re;
__device__ double Ih20Re;
__device__ double Ih21Re;
__device__ double Ih22Re;
__device__ double Ih23Re;
__device__ double Ih24Re;
__device__ double Ih25Re;
__device__ double Ih26Re;
__device__ double Ih27Re;
__device__ double Ih28Re;
__device__ double Ih29Re;
__device__ double Ih30Re;
__device__ double Ih1Im;
__device__ double Ih2Im;
__device__ double Ih3Im;
__device__ double Ih4Im;
__device__ double Ih5Im;
__device__ double Ih6Im;
__device__ double Ih7Im;
__device__ double Ih8Im;
__device__ double Ih9Im;
__device__ double Ih10Im;
__device__ double Ih11Im;
__device__ double Ih12Im;
__device__ double Ih13Im;
__device__ double Ih14Im;
__device__ double Ih15Im;
__device__ double Ih16Im;
__device__ double Ih17Im;
__device__ double Ih18Im;
__device__ double Ih19Im;
__device__ double Ih20Im;
__device__ double Ih21Im;
__device__ double If1;
__device__ double If2;
__device__ double If3;
__device__ double If4;
__device__ double If5;
__device__ double If6;
__device__ double If7;
__device__ double If8;
__device__ double If9;
__device__ double If10;
__device__ double If11;
__device__ double If12;
__device__ double If13;
__device__ double If14;
__device__ double If15;
__device__ double If16;
__device__ double If17;
__device__ double If18;
__device__ double reAj1j2h_temp[3][3][5];
__device__ double imAj1j2h_temp[3][3][5];
__device__ double reAbarj1j2h_temp[3][3][5];
__device__ double imAbarj1j2h_temp[3][3][5];
__device__ pycuda::complex<double> Aj1j2h_temp(int j1, int j2, int h) {
return pycuda::complex<double>(reAj1j2h_temp[j1][j2][h],imAj1j2h_temp[j1][j2][h]);
}
__device__ pycuda::complex<double> Abarj1j2h_temp(int j1, int j2, int h) {
return pycuda::complex<double>(reAbarj1j2h_temp[j1][j2][h],imAbarj1j2h_temp[j1][j2][h]);
}
__device__ double T_cosh_temp[4][max_N_events];
__device__ double T_sinh_temp[4][max_N_events];
__device__ double T_cos_temp[4][max_N_events];
__device__ double T_sin_temp[4][max_N_events];
__device__ double IT_cosh_temp_deltat[4][max_N_events];
__device__ double IT_sinh_temp_deltat[4][max_N_events];
__device__ double IT_cos_temp_deltat[4][max_N_events];
__device__ double IT_sin_temp_deltat[4][max_N_events];
__device__ double zeta_temp[4][max_N_events];
__device__ double DCP_tzero_temp[4][max_N_events];
__device__ double fi_cos1_temp[18][4][max_N_events];
__device__ double fi_cos2_temp[18][4][max_N_events];
__device__ double gi_temp[15][4][max_N_events];
__device__ double reMj1j2_temp[3][3][4][max_N_events];
__device__ double imMj1j2_temp[3][3][4][max_N_events];
__device__ double phasespace_temp[4][max_N_events];
__device__ double reIhj1j2j1pj2pdict[3][3][3][3];
__device__ double imIhj1j2j1pj2pdict[3][3][3][3];
__device__ pycuda::complex<double> Mj1j2_temp(int j1, int j2, int icat, int iev) {
return pycuda::complex<double>(reMj1j2_temp[j1][j2][icat][iev],imMj1j2_temp[j1][j2][icat][iev]);
}
__device__ pycuda::complex<double> Ihj1j2j1pj2p(int j1, int j2, int j1p, int j2p) {
return pycuda::complex<double>(reIhj1j2j1pj2pdict[j1][j2][j1p][j2p],imIhj1j2j1pj2pdict[j1][j2][j1p][j2p]);
}
// ##########################################
// Toy MC generation variables
__device__ double knots_gen_wide[6] = {0.0,0.9,1.3,1.9,3.0,12.0};
__device__ double knots_gen_narrow[6] = {0.0,0.9,1.3,1.9,3.0,12.0};
__device__ double a_gen_wide[5][4] = {{0.0,-0.00138436998913,2.5481847953,-1.45909728079},
{-1.6653800648,5.54988251268,-3.61988951878,0.82537468739},
{-0.289336418837,2.37439717584,-1.17720849044,0.199046218586},
{0.993185871959,0.349361979846,-0.111400492548,0.0120623593064},
{1.32606052325,0.0164873285591,-0.000442275452223,-0.000266331481965}};
__device__ double a_gen_narrow[5][4] = {{0.0,0.00101382530285,4.89487359849,-2.83048035352},
{-3.54249846114,11.8093420291,-8.22549107238,2.02891396902},
{1.06333885612,1.18048668157,-0.0494484973637,-0.0675072040589},
{-0.421082535913,3.52430993215,-1.28303968188,0.148912301997},
{3.78015377185,-0.67692637561,0.117372420705,-0.006689042735}};
__device__ double k1_gen(int wide_window) {
if (wide_window) {return -0.40631262195;}
else {return -0.505556252411;}
}
__device__ double k2_gen(int wide_window) {
if (wide_window) {return -0.39861379722;}
else {return -0.404368705592;}
}
__device__ double k3_gen(int wide_window) {
if (wide_window) {return -0.0363987194893;}
else {return -0.0483750503137;}
}
__device__ double k4_gen(int wide_window) {
if (wide_window) {return -0.0644151228873;}
else {return -0.0175772310185;}
}
__device__ double k5_gen(int wide_window) {
if (wide_window) {return 0.0270906873059;}
else {return 0.0389936024545;}
}
__device__ double p1_gen(int wide_window) {
if (wide_window) {return -0.000100573256821;}
else {return 4.35273527839e-05;}
}
__device__ double knot_gen(int wide_window,int i) {
if (wide_window == 0) {return knots_gen_narrow[i];}
else {return knots_gen_wide[i];}
}
__device__ double coef_gen(int wide_window,int ibin,int deg) {
if (wide_window == 0) {return a_gen_narrow[ibin][deg];}
else {return a_gen_wide[ibin][deg];}
}
__device__ double accGenTime(double tau) {
int tau_bin;
if (tau < knot_gen(wide_window,1)) {tau_bin = 0;}
else if ((tau >= knot_gen(wide_window,1)) and (tau < knot_gen(wide_window,2))) {tau_bin = 1;}
else if ((tau >= knot_gen(wide_window,2)) and (tau < knot_gen(wide_window,3))) {tau_bin = 2;}
else if ((tau >= knot_gen(wide_window,3)) and (tau < knot_gen(wide_window,4))) {tau_bin = 3;}
else {tau_bin = 4;}
return coef_gen(wide_window,tau_bin,0)+tau*coef_gen(wide_window,tau_bin,1)+tau*tau*coef_gen(wide_window,tau_bin,2)+tau*tau*tau*coef_gen(wide_window,tau_bin,3);
}
__device__ double accGenAng(double x) {
return 1.+k1_gen(wide_window)*x+k2_gen(wide_window)*(2.*x*x-1.)+k3_gen(wide_window)*(4.*x*x*x-3.*x)+k4_gen(wide_window)*(8.*x*x*x*x-8.*x*x+1.)+k5_gen(wide_window)*(16.*x*x*x*x*x-20.*x*x*x+5.*x);
}
__device__ double accGenMass(double m) {
return 1. + p1_gen(wide_window)*m;
}
__device__ double accGen(double tau, double ma, double mb, double cos1var, double cos2var, double phivar) {
return accGenTime(tau)*accGenMass(ma)*accGenMass(mb)*accGenAng(cos1var)*accGenAng(cos2var);
}
// ##########################################
// Physical terms
__device__ double reAj1j2h(int j1, int j2, int h) {
switch(j1) {
case 0 :
switch(j2) {
case 0 : return reA00;
case 1 : return reA01;
case 2 : return reA02;
}
case 1 :
switch(j2) {
case 0 : return reA10;
case 1 :
switch(h) {
case 0 : return reA110;
case 1 : return reA11par;
case 2 : return reA11perp;
}
case 2 :
switch(h) {
case 0 : return reA120;
case 1 : return reA12par;
case 2 : return reA12perp;
}
}
case 2 :
switch(j2) {
case 0 : return reA20;
case 1 :
switch(h) {
case 0 : return reA210;
case 1 : return reA21par;
case 2 : return reA21perp;
}
case 2 :
switch(h) {
case 0 : return reA220;
case 1 : return reA22par;
case 2 : return reA22perp;
case 3 : return reA22par2;
case 4 : return reA22perp2;
}
}
}
return 0.;
}
__device__ double imAj1j2h(int j1, int j2, int h) {
switch(j1) {
case 0 :
switch(j2) {
case 0 : return imA00;
case 1 : return imA01;
case 2 : return imA02;
}
case 1 :
switch(j2) {
case 0 : return imA10;
case 1 :
switch(h) {
case 0 : return imA110;
case 1 : return imA11par;
case 2 : return imA11perp;
}
case 2 :
switch(h) {
case 0 : return imA120;
case 1 : return imA12par;
case 2 : return imA12perp;
}
}
case 2 :
switch(j2) {
case 0 : return imA20;
case 1 :
switch(h) {
case 0 : return imA210;
case 1 : return imA21par;
case 2 : return imA21perp;
}
case 2 :
switch(h) {
case 0 : return imA220;
case 1 : return imA22par;
case 2 : return imA22perp;
case 3 : return imA22par2;
case 4 : return imA22perp2;
}
}
}
return 0.;
}
__device__ double DCPj1j2(int j1, int j2) {
switch(j1) {
case 0 :
switch(j2) {
case 0 : return DCP;//+DCP_SS;
case 1 : return DCP;//+DCP_SV;
case 2 : return DCP;//+DCP_TT;//DCP_ST;
}
case 1 :
switch(j2) {
case 0 : return DCP;//+DCP_VS;
case 1 : return DCP;
case 2 : return DCP;//+DCP_TT;//DCP_VT;
}
case 2 :
switch(j2) {
case 0 : return DCP;//+DCP_TT;//DCP_TS;
case 1 : return DCP;//+DCP_TT;//DCP_TV;
case 2 : return DCP;//+DCP_TT;
}
}
return 0;
}
__device__ double dphij1j2(int j1, int j2) {
switch(j1) {
case 0 :
switch(j2) {
case 0 : return dphi_SS;
case 1 : return dphi_SV;
case 2 : return dphi_ST;
}
case 1 :
switch(j2) {
case 0 : return dphi_VS;
case 1 : return 0.;
case 2 : return dphi_VT;
}
case 2 :
switch(j2) {
case 0 : return dphi_TS;
case 1 : return dphi_TV;
case 2 : return dphi_TT;
}
}
return 0;
}
__device__ double etah(int h) {
if ((h == 2) or (h == 4)) {return -1.;}
else {return 1.;}
}
__device__ double etaj1j2h(int j1, int j2, int h) {
return pow(-1.,j1+j2)*etah(h);
}
__device__ pycuda::complex<double> Aj1j2h(int j1, int j2, int h) {
pycuda::complex<double> I(0.,1.);
return pycuda::complex<double>(reAj1j2h(j1,j2,h),imAj1j2h(j1,j2,h))*pycuda::complex<double>(sqrt(1.+DCPj1j2(j1,j2)))*exp(I*0.5*(phis+dphij1j2(j1,j2)));
}
__device__ pycuda::complex<double> Abarj1j2h(int j1, int j2, int h) {
pycuda::complex<double> I(0.,1.);
return etaj1j2h(j2,j1,h)*pycuda::complex<double>(reAj1j2h(j2,j1,h),imAj1j2h(j2,j1,h))*pycuda::complex<double>(sqrt(1.-DCPj1j2(j2,j1)))*exp(-I*0.5*(phis+dphij1j2(j2,j1)));
}
__device__ pycuda::complex<double> M_Average(int j1, int j2, int h, int j1p, int j2p, int hp) {
return Aj1j2h_temp(j1,j2,h)*pycuda::conj(Aj1j2h_temp(j1p,j2p,hp))+Abarj1j2h_temp(j1,j2,h)*pycuda::conj(Abarj1j2h_temp(j1p,j2p,hp));
}
__device__ pycuda::complex<double> M_DeltaGamma(int j1, int j2, int h, int j1p, int j2p, int hp) {
return Aj1j2h_temp(j1,j2,h)*pycuda::conj(Abarj1j2h_temp(j1p,j2p,hp))+Abarj1j2h_temp(j1,j2,h)*pycuda::conj(Aj1j2h_temp(j1p,j2p,hp));
}
__device__ pycuda::complex<double> M_DirCP(int j1, int j2, int h, int j1p, int j2p, int hp) {
return Aj1j2h_temp(j1,j2,h)*pycuda::conj(Aj1j2h_temp(j1p,j2p,hp))-Abarj1j2h_temp(j1,j2,h)*pycuda::conj(Abarj1j2h_temp(j1p,j2p,hp));
}
__device__ pycuda::complex<double> M_MixCP(int j1, int j2, int h, int j1p, int j2p, int hp) {
return pycuda::complex<double>(0.,-1.)*(Aj1j2h_temp(j1,j2,h)*pycuda::conj(Abarj1j2h_temp(j1p,j2p,hp))-Abarj1j2h_temp(j1,j2,h)*pycuda::conj(Aj1j2h_temp(j1p,j2p,hp)));
}
// ##########################################
// Flavour tagging terms
__device__ double omega_SSK(double eta) {
return (p0metac_tag_SSK+etac_tag_SSK+Dp0half_tag_SSK)+(p1_tag_SSK+Dp1half_tag_SSK)*(eta-etac_tag_SSK);
}
__device__ double omegabar_SSK(double eta) {
return (p0metac_tag_SSK+etac_tag_SSK-Dp0half_tag_SSK)+(p1_tag_SSK-Dp1half_tag_SSK)*(eta-etac_tag_SSK);
}
__device__ double omega_OS(double eta) {
return (p0metac_tag_OS+etac_tag_OS+Dp0half_tag_OS)+(p1_tag_OS+Dp1half_tag_OS)*(eta-etac_tag_OS);
}
__device__ double omegabar_OS(double eta) {
return (p0metac_tag_OS+etac_tag_OS-Dp0half_tag_OS)+(p1_tag_OS-Dp1half_tag_OS)*(eta-etac_tag_OS);
}
__device__ double P_Bs(int q1, int q2, double eta1, double eta2) {
return (1.+0.5*q1*(1.-q1-2.*omega_SSK(eta1)))*(1.+0.5*q2*(1.-q2-2.*omega_OS(eta2)));
}
__device__ double P_Bsbar(int q1, int q2, double eta1, double eta2) {
return (1.-0.5*q1*(1.+q1-2.*omegabar_SSK(eta1)))*(1.-0.5*q2*(1.+q2-2.*omegabar_OS(eta2)));
}
__device__ double zeta(int q1, int q2, double eta1, double eta2) {
return 0.5*((1.+DCP_prod)*P_Bs(q1,q2,eta1,eta2)+(1.-DCP_prod)*P_Bsbar(q1,q2,eta1,eta2));
}
__device__ double DCP_tzero(int q1, int q2, double eta1, double eta2) {
return 0.5/zeta(q1,q2,eta1,eta2)*((1.+DCP_prod)*P_Bs(q1,q2,eta1,eta2)-(1.-DCP_prod)*P_Bsbar(q1,q2,eta1,eta2));
}
// ##########################################
// Time dependent terms
#define errf_const 1.12837916709551
#define xLim 5.33
#define yLim 4.29
__device__ pycuda::complex<double> faddeeva(pycuda::complex<double> z) {
double in_real = pycuda::real(z);
double in_imag = pycuda::imag(z);
int n, nc, nu;
double h, q, Saux, Sx, Sy, Tn, Tx, Ty, Wx, Wy, xh, xl, x, yh, y;
double Rx [33];
double Ry [33];
x = fabs(in_real);
y = fabs(in_imag);
if (y < yLim && x < xLim) {
q = (1.0 - y / yLim) * sqrt(1.0 - (x / xLim) * (x / xLim));
h = 1.0 / (3.2 * q);
nc = 7 + int(23.0 * q);
xl = pow(h, double(1 - nc));
xh = y + 0.5 / h;
yh = x;
nu = 10 + int(21.0 * q);
Rx[nu] = 0.;
Ry[nu] = 0.;
for (n = nu; n > 0; n--){
Tx = xh + n * Rx[n];
Ty = yh - n * Ry[n];
Tn = Tx*Tx + Ty*Ty;
Rx[n-1] = 0.5 * Tx / Tn;
Ry[n-1] = 0.5 * Ty / Tn;
}
Sx = 0.;
Sy = 0.;
for (n = nc; n>0; n--){
Saux = Sx + xl;
Sx = Rx[n-1] * Saux - Ry[n-1] * Sy;
Sy = Rx[n-1] * Sy + Ry[n-1] * Saux;
xl = h * xl;
};
Wx = errf_const * Sx;
Wy = errf_const * Sy;
}
else {
xh = y;
yh = x;
Rx[0] = 0.;
Ry[0] = 0.;
for (n = 9; n>0; n--){
Tx = xh + n * Rx[0];
Ty = yh - n * Ry[0];
Tn = Tx * Tx + Ty * Ty;
Rx[0] = 0.5 * Tx / Tn;
Ry[0] = 0.5 * Ty / Tn;
};
Wx = errf_const * Rx[0];
Wy = errf_const * Ry[0];
}
if (y == 0.) {
Wx = exp(-x * x);
}
if (in_imag < 0.) {
Wx = 2.0 * exp(y * y - x * x) * cos(2.0 * x * y) - Wx;
Wy = - 2.0 * exp(y * y - x * x) * sin(2.0 * x * y) - Wy;
if (in_real > 0.) {
Wy = -Wy;
}
}
else if (in_real < 0.) {
Wy = -Wy;
}
return pycuda::complex<double>(Wx,Wy);
}
__device__ pycuda::complex<double> conv_exp(double x, pycuda::complex<double> z) {
double re = pycuda::real(z)-x;
if (re>-5.0) {return 0.5*faddeeva(pycuda::complex<double>(-pycuda::real(z),re))*exp(-x*x);}
else {
pycuda::complex<double> mi(0,-1);
pycuda::complex<double> zp = mi*(z-x);
pycuda::complex<double> zsq = zp*zp;
pycuda::complex<double> v = -zsq -x*x;
pycuda::complex<double> iz(pycuda::real(z)+x,pycuda::real(z)-x);
return 0.5*exp(v)*(exp(zsq)/(iz*sqrt(pi)) + 1.)*2. ;
}
}
__device__ pycuda::complex<double> Kn(pycuda::complex<double> z, int n) {
if (n == 0) {return 1./(2.*z);}
else if (n == 1) {return 1./(2.*z*z);}
else if (n == 2) {return 1./z*(1.+1./(z*z));}
else if (n == 3) {return 3./(z*z)*(1.+1./(z*z));}
return pycuda::complex<double>(0.,0.);
}
__device__ pycuda::complex<double> Mn_x(double x, pycuda::complex<double> z, int n) {
if (n == 0) {return pycuda::complex<double>(erf(x),0.)-2.*conv_exp(x,z);}
else if (n == 1) {return 2.*(-pycuda::complex<double>(sqrt(1./pi)*exp(-x*x),0.)-2.*x*conv_exp(x,z));}
else if (n == 2) {return 2.*(-2.*x*exp(-x*x)*pycuda::complex<double>(sqrt(1./pi),0.)-(2.*x*x-1.)*2.*conv_exp(x,z));}
else if (n == 3) {return 4.*(-(2.*x*x-1.)*exp(-x*x)*pycuda::complex<double>(sqrt(1./pi),0.)-x*(2.*x*x-3.)*2.*conv_exp(x,z));}
return pycuda::complex<double>(0.,0.);
}
__device__ pycuda::complex<double> Mn(double x_1, double x_2, pycuda::complex<double> z, int n) {
return Mn_x(x_2,z,n)-Mn_x(x_1,z,n);
}
__device__ pycuda::complex<double> Tj1j2hj1pj2php(int j1, int j2, int h, int j1p, int j2p, int hp, int icat, int iev) {
return zeta_temp[icat][iev]*((T_cosh_temp[icat][iev]*M_Average(j1,j2,h,j1p,j2p,hp)-T_sinh_temp[icat][iev]*M_DeltaGamma(j1,j2,h,j1p,j2p,hp))+DCP_tzero_temp[icat][iev]*(T_cos_temp[icat][iev]*M_DirCP(j1,j2,h,j1p,j2p,hp)+T_sin_temp[icat][iev]*M_MixCP(j1,j2,h,j1p,j2p,hp)));
}
__device__ pycuda::complex<double> ITj1j2hj1pj2php_deltat(int j1, int j2, int h, int j1p, int j2p, int hp, int icat, int iev) {
return (IT_cosh_temp_deltat[icat][iev]*M_Average(j1,j2,h,j1p,j2p,hp)-IT_sinh_temp_deltat[icat][iev]*M_DeltaGamma(j1,j2,h,j1p,j2p,hp))+DCP_prod*(IT_cos_temp_deltat[icat][iev]*M_DirCP(j1,j2,h,j1p,j2p,hp)+IT_sin_temp_deltat[icat][iev]*M_MixCP(j1,j2,h,j1p,j2p,hp));
}
// ##########################################
// Angular terms
__device__ double fi(double x, int i) {
switch(i) {
case 1 : return 1.;
case 2 : return x;
case 3 : return sqrt(1.-x*x);
case 4 : return 3.*x*x-1.;
case 5 : return x*sqrt(1.-x*x);
case 6 : return x*x;
case 7 : return x*(3.*x*x-1.);
case 8 : return x*x*sqrt(1.-x*x);
case 9 : return 1.-x*x;
case 10 : return (3.*x*x-1.)*sqrt(1.-x*x);
case 11 : return x*(1.-x*x);
case 12 : return (3.*x*x-1.)*(3.*x*x-1.);
case 13 : return x*(3.*x*x-1.)*sqrt(1.-x*x);
case 14 : return x*x*(1.-x*x);
case 15 : return (1.-x*x)*sqrt(1.-x*x);
case 16 : return (3.*x*x-1.)*(1.-x*x);
case 17 : return x*(1.-x*x)*sqrt(1.-x*x);
case 18 : return (1.-x*x)*(1.-x*x);
}
return 0.;
}
__device__ double gi(double x, int i) {
switch(i) {
case 1 : return 1.;
case 2 : return cos(x);
case 3 : return sin(x);
case 4 : return cos(x)*cos(x);
case 5 : return sin(x)*cos(x);
case 6 : return sin(x)*sin(x);
case 7 : return cos(2.*x);
case 8 : return sin(2.*x);
case 9 : return cos(x)*cos(2.*x);
case 10 : return cos(x)*sin(2.*x);
case 11 : return sin(x)*cos(2.*x);
case 12 : return sin(x)*sin(2.*x);
case 13 : return cos(2.*x)*cos(2.*x);
case 14 : return sin(2.*x)*cos(2.*x);
case 15 : return sin(2.*x)*sin(2.*x);
}
return 0.;
}
__device__ double fjjphhp_cos1(int j, int jp, int h, int hp, int icat, int iev) {
return fi_cos1_temp[(int) fjjphhpindexdict[j][jp][h][hp]-1][icat][iev];
}
__device__ double fjjphhp_cos2(int j, int jp, int h, int hp, int icat, int iev) {
return fi_cos2_temp[(int) fjjphhpindexdict[j][jp][h][hp]-1][icat][iev];
}
__device__ double ghhp_phi(int h, int hp, int icat, int iev) {
return gi_temp[(int) ghhpindexdict[h][hp]-1][icat][iev];
}
__device__ pycuda::complex<double> Nj1j2hj1pj2php(int j1, int j2, int h, int j1p, int j2p, int hp) {
return Nj1j2hdict(j1,j2,h)*pycuda::conj(Nj1j2hdict(j1p,j2p,hp));
}
// ##########################################
// Mass dependent terms
__device__ double get_q(double M, double ma, double mb) {
double M2 = M*M;
double m12 = ma*ma;
double m22 = mb*mb;
double q2 = .25*( M2*M2 - 2*M2*(m12+m22) +(m12*m12+m22*m22)-2*m12*m22) /M2;
if (q2<0) {return 0.;}
return sqrt(q2);
}
__device__ double Blatt_Weisskopf2(double q, double q0, int L) {
if (L<1.) {return 1.;}
double d = 1.6e-03;
double z = q*d*q*d;
double z0 = q0*d*q0*d;
if (L==1) {return (1+z0)/(1+z);}
else if (L==2) {return ((z0-3)*(z0-3) + 9*z0) / ((z-3)*(z-3) + 9*z);}
else if (L==3) {return (z0*(z0-15)*(z0-15) + 9*(z0-5)) / (z*(z-15)*(z-15) + 9*(z-5));}
return ( pow(z0*z0 -45*z0+105,2) +25*z0*(2*z0-21)*(2*z0-21)) /(pow(z*z -45*z+105,2) +25*z*(2*z-21)*(2*z-21));
}
__device__ double FL_j1j2(int j1, int j2, double ma, double mb) {
double p = get_q(MBs,ma,mb);
double q_1 = get_q(ma,MPion,MKaon);
double q_2 = get_q(mb,MPion,MKaon);
double m0 = 895.81;
double p0 = get_q(MBs,m0,m0);
double q0 = get_q(m0,MPion,MKaon);
//int L;
//if (j1*j2>0) {L = abs(j1-j2)+1;}
//else {L = abs(j1-j2);}
int L = abs(j1-j2);
double FL_Bs = pow(p/p0,L)*sqrt(Blatt_Weisskopf2(p,p0,L));
double FL_Kpi1 = pow((q_1/q0),j1)*sqrt(Blatt_Weisskopf2(q_1,q0,j1));
double FL_Kpi2 = pow((q_2/q0),j2)*sqrt(Blatt_Weisskopf2(q_2,q0,j2));
return FL_Bs*FL_Kpi1*FL_Kpi2;
}
__device__ pycuda::complex<double> Resonance(double m, double m0, double g0, int J) {
double q = get_q(m,MPion,MKaon);
double q0 = get_q(m0,MPion,MKaon);
double gamma = g0*pow(q/q0,2*J+1)*(m0/m)*Blatt_Weisskopf2(q,q0,J);
pycuda::complex<double> num(m0*g0,0.);
pycuda::complex<double> denom(m0*m0-m*m,-m0*gamma);
pycuda::complex<double> BW = num/denom;
pycuda::complex<double> I(0.,1.);
if (J == 1) {return BW*exp(-I*1.5707963267948966);}
else if (J == 2) {return BW*exp(-I*0.006008360479292941);}
return BW;
}
__device__ double omega_Stheo(double m) {
double m_GeV = m/1000.;
double svar_GeV = m_GeV*m_GeV;
double Delta_Kpi = MKaon/1000.*MKaon/1000.-MPion/1000.*MPion/1000.;
double y_s = pow((svar_GeV-Delta_Kpi)/(svar_GeV+Delta_Kpi),2);
double y_s0 = pow((s0_Stheo-Delta_Kpi)/(s0_Stheo+Delta_Kpi),2);
return (sqrt(y_s)-alpha_Stheo*sqrt(y_s0-y_s))/(sqrt(y_s)+alpha_Stheo*sqrt(y_s0-y_s));
}
__device__ double cotdelta_Stheo(double m) {
double m_GeV = m/1000.;
double svar_GeV = m_GeV*m_GeV;
double q_Kpi_GeV = get_q(m,MKaon,MPion)/1000.;
double omega_func = omega_Stheo(m);
return m_GeV/(2.*q_Kpi_GeV*(svar_GeV-sAdler_Stheo))*(B0_Stheo+B1_Stheo*omega_func);
}
__device__ double get_p1_Stheo(double q) {
return 1.+a_Stheo*q/1000.*q/1000.+b_Stheo*q/1000.*q/1000.*q/1000.*q/1000.;
}
__device__ double get_p2_Stheo(double q) {
return 1.+c_Stheo*q/1000.*q/1000.;
}
__device__ double Theta_Keta(double m) {
if (m>=(MKaon+MEta)) {return 1.;}
return 0.;
}
__device__ pycuda::complex<double> Prop_Stheo(double m) {
pycuda::complex<double> I(0.,1.);
double m_GeV = m/1000.;
double svar_GeV = m_GeV*m_GeV;
double q_Kpi_GeV = get_q(m,MKaon,MPion)/1000.;
double q_Keta_GeV = get_q(m,MKaon,MEta)/1000.;
pycuda::complex<double> T;
if (m<(MKaon+MEta)) {
T = 1./(cotdelta_Stheo(m)-I);
}
else {
pycuda::complex<double> S0b = exp(I*2.*q_Keta_GeV*(phi0_Stheo+phi1_Stheo*q_Keta_GeV*q_Keta_GeV));
double q_Kpi_r1_GeV = get_q(sqrtsr1_Stheo*1000.,MKaon,MPion)/1000.;
double q_Keta_r1_GeV = get_q(sqrtsr1_Stheo*1000.,MKaon,MEta)/1000.;
double q_Kpi_r2_GeV = get_q(sqrtsr2_Stheo*1000.,MKaon,MPion)/1000.;
double q_Keta_r2_GeV = get_q(sqrtsr2_Stheo*1000.,MKaon,MEta)/1000.;
double q_Kpi_hat_GeV = get_q(MKaon+MEta,MKaon,MPion)/1000.;
double beta_Stheo = 1./cotdelta_Stheo(MKaon+MEta);
double P1_s = (sqrtsr1_Stheo*sqrtsr1_Stheo-svar_GeV)*beta_Stheo+e1_Stheo*G1_Stheo*(get_p1_Stheo(q_Kpi_GeV*1000.)*(q_Kpi_GeV-q_Kpi_hat_GeV))/(get_p1_Stheo(q_Kpi_r1_GeV*1000.)*(q_Kpi_r1_GeV-q_Kpi_hat_GeV));
double Q1_s = (1.-e1_Stheo)*G1_Stheo*get_p1_Stheo(q_Kpi_GeV*1000.)/get_p1_Stheo(q_Kpi_r1_GeV*1000.)*q_Keta_GeV/q_Keta_r1_GeV*Theta_Keta(m);
double P2_s = e2_Stheo*G2_Stheo*(get_p2_Stheo(q_Kpi_GeV*1000.)*(q_Kpi_GeV-q_Kpi_hat_GeV))/(get_p2_Stheo(q_Kpi_r2_GeV*1000.)*(q_Kpi_r2_GeV-q_Kpi_hat_GeV));
double Q2_s = (1.-e2_Stheo)*G2_Stheo*get_p2_Stheo(q_Kpi_GeV*1000.)/get_p2_Stheo(q_Kpi_r2_GeV*1000.)*q_Keta_GeV/q_Keta_r2_GeV*Theta_Keta(m);
pycuda::complex<double> S1r = (sqrtsr1_Stheo*sqrtsr1_Stheo-svar_GeV+I*(P1_s-Q1_s))/(sqrtsr1_Stheo*sqrtsr1_Stheo-svar_GeV-I*(P1_s+Q1_s));
pycuda::complex<double> S2r = (sqrtsr2_Stheo*sqrtsr2_Stheo-svar_GeV+I*(P2_s-Q2_s))/(sqrtsr2_Stheo*sqrtsr2_Stheo-svar_GeV-I*(P2_s+Q2_s));
T = (S0b*S1r*S2r-1.)/(2.*I);
}
double xm = (m-1175.)/425.;
double modulus = 1.+c1_mass_swave*xm+c2_mass_swave*(2.*xm*xm-1.)+c3_mass_swave*(4.*xm*xm*xm-3.*xm)+c4_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
return pycuda::complex<double>(modulus)*exp(I*(pycuda::arg(T)-0.7095863518296103));
}
__device__ pycuda::complex<double> Prop_ModInd(double m) {
double xm = (m-1175.)/425.;
double re_T = 1.+c1_mass_swave*xm+c2_mass_swave*(2.*xm*xm-1.)+c3_mass_swave*(4.*xm*xm*xm-3.*xm)+c4_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
double im_T = c5_mass_swave+c6_mass_swave*xm+c7_mass_swave*(2.*xm*xm-1.)+c8_mass_swave*(4.*xm*xm*xm-3.*xm)+c9_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
pycuda::complex<double> T(re_T,im_T);
return T;
}
__device__ pycuda::complex<double> Prop_S_Palano(double m) {
pycuda::complex<double> i(0.,1.);
double m_GeV = m/1000.;
double svar_GeV = m_GeV*m_GeV;
double q_Kpi_GeV = get_q(m,MKaon,MPion)/1000.;
double q_Keta_GeV = get_q(m,MKaon,MEta)/1000.;
double rho_1 = 2.*q_Kpi_GeV/m_GeV;
double rho_2 = 2.*q_Keta_GeV/m_GeV;
double sbot_GeV = 0.36;
double stop_GeV = 5.832;
double X = (2.*svar_GeV-(stop_GeV+sbot_GeV))/(stop_GeV-sbot_GeV);
double K11 = (svar_GeV-s_A_palano)/s_Kpi_palano*(g_1_a_palano*g_1_a_palano/(svar_GeV-s_a_palano)+g_1_b_palano*g_1_b_palano/(svar_GeV-s_b_palano)+C_11_0_palano+C_11_1_palano*X+C_11_2_palano*X*X+C_11_3_palano*X*X*X);
double K12 = (svar_GeV-s_A_palano)/s_Kpi_palano*(g_1_a_palano*g_2_a_palano/(svar_GeV-s_a_palano)+g_1_b_palano*g_2_b_palano/(svar_GeV-s_b_palano)+C_12_0_palano+C_12_1_palano*X+C_12_2_palano*X*X+C_12_3_palano*X*X*X);
double K22 = (svar_GeV-s_A_palano)/s_Kpi_palano*(g_2_a_palano*g_2_a_palano/(svar_GeV-s_a_palano)+g_2_b_palano*g_2_b_palano/(svar_GeV-s_b_palano)+C_22_0_palano+C_22_1_palano*X+C_22_2_palano*X*X+C_22_3_palano*X*X*X);
double detK = K11*K22-K12*K12;
pycuda::complex<double> Delta(1.-rho_1*rho_2*detK,-rho_1*K11-rho_2*K22);
pycuda::complex<double> T11_hat = s_Kpi_palano/(svar_GeV-s_A_palano)*(K11-rho_2*detK)/Delta;
pycuda::complex<double> T12_hat = s_Kpi_palano/(svar_GeV-s_A_palano)*K12/Delta;
double xm = X;//(m-1175.)/425.;
double alpha_1_s = 1.+c1_mass_swave*xm+c2_mass_swave*(2.*xm*xm-1.)+c3_mass_swave*(4.*xm*xm*xm-3.*xm)+c4_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
double alpha_2_s = c5_mass_swave+c6_mass_swave*xm+c7_mass_swave*(2.*xm*xm-1.)+c8_mass_swave*(4.*xm*xm*xm-3.*xm)+c9_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
pycuda::complex<double> T = alpha_1_s*T11_hat+alpha_2_s*T12_hat;
return T*exp(i*3.06573);
}
__device__ pycuda::complex<double> Prop_Lass(double m) {
pycuda::complex<double> i(0,1);
double a_lass_ = 1./c1_mass_swave;
double r_lass_ = c2_mass_swave;
double m0_ = c3_mass_swave;
double g0_ = c4_mass_swave;
double q = get_q(m,MPion,MKaon);
double q0 = get_q(m0_,MPion,MKaon);
double cotg_deltaB = 1./(a_lass_*q)+0.5*r_lass_*q;
double deltaB = atan(1./cotg_deltaB);
pycuda::complex<double> expo = exp(i*2.*deltaB);
double gamma = g0_*(q/q0)*(m0_/m);
double cotg_deltaR = (m0_*m0_-m*m)/(m0_*gamma);
pycuda::complex<double> T = 1./(cotg_deltaB-i)+expo/(cotg_deltaR-i);
return T;
}
__device__ pycuda::complex<double> Mji(double m, int ji) {
pycuda::complex<double> i(0,1);
pycuda::complex<double> T;
if (ji == 0)
{
T = Prop_Stheo(m)*exp(-i*pycuda::arg(Prop_Stheo(mv)));
}
else if (ji == 1)
{
T = Resonance(m,mv,gv,1)*exp(-i*pycuda::arg(Resonance(mv,mv,gv,1)));
//T = (Resonance(m,mv,gv,1)+pycuda::complex<double>(c5_mass_swave,c6_mass_swave)*Resonance(m,MKst_1_1410,GKst_1_1410,1)+pycuda::complex<double>(c7_mass_swave,c8_mass_swave)*Resonance(m,MKst_1_1680,GKst_1_1680,1))*exp(-i*pycuda::arg(Resonance(mv,mv,gv,1)+pycuda::complex<double>(c5_mass_swave,c6_mass_swave)*Resonance(mv,MKst_1_1410,GKst_1_1410,1)+pycuda::complex<double>(c7_mass_swave,c8_mass_swave)*Resonance(mv,MKst_1_1680,GKst_1_1680,1)));
}
else if (ji == 2)
{
T = Resonance(m,mt,gt,2)*exp(-i*pycuda::arg(Resonance(mv,mt,gt,2)));
}
return T;
}
__device__ pycuda::complex<double> Mj1j2(double ma, double mb, int j1, int j2) {
double scale_factor = 1.;
if ((j1 == 0) and (j2 == 0)) {return Mji(ma,0)*Mji(mb,0)*FL_j1j2(0,0,ma,mb)*(scale_factor/sqrt(Im00));}
else if ((j1 == 0) and (j2 == 1)) {return Mji(ma,0)*Mji(mb,1)*FL_j1j2(0,1,ma,mb)*(scale_factor/sqrt(Im01));}
else if ((j1 == 1) and (j2 == 0)) {return Mji(ma,1)*Mji(mb,0)*FL_j1j2(1,0,ma,mb)*(scale_factor/sqrt(Im10));}
else if ((j1 == 0) and (j2 == 2)) {return Mji(ma,0)*Mji(mb,2)*FL_j1j2(0,2,ma,mb)*(scale_factor/sqrt(Im02));}
else if ((j1 == 2) and (j2 == 0)) {return Mji(ma,2)*Mji(mb,0)*FL_j1j2(2,0,ma,mb)*(scale_factor/sqrt(Im20));}
else if ((j1 == 1) and (j2 == 1)) {return Mji(ma,1)*Mji(mb,1)*FL_j1j2(1,1,ma,mb)*(scale_factor/sqrt(Im11));}
else if ((j1 == 1) and (j2 == 2)) {return Mji(ma,1)*Mji(mb,2)*FL_j1j2(1,2,ma,mb)*(scale_factor/sqrt(Im12));}
else if ((j1 == 2) and (j2 == 1)) {return Mji(ma,2)*Mji(mb,1)*FL_j1j2(2,1,ma,mb)*(scale_factor/sqrt(Im21));}
else if ((j1 == 2) and (j2 == 2)) {return Mji(ma,2)*Mji(mb,2)*FL_j1j2(2,2,ma,mb)*(scale_factor/sqrt(Im22));}
return pycuda::complex<double>(0.,0.);
}
__device__ pycuda::complex<double> Mj1j2_unnorm(double ma, double mb, int j1, int j2) {
if ((j1 == 0) and (j2 == 0)) {return Mji(ma,0)*Mji(mb,0)*FL_j1j2(0,0,ma,mb);}
else if ((j1 == 0) and (j2 == 1)) {return Mji(ma,0)*Mji(mb,1)*FL_j1j2(0,1,ma,mb);}
else if ((j1 == 1) and (j2 == 0)) {return Mji(ma,1)*Mji(mb,0)*FL_j1j2(1,0,ma,mb);}
else if ((j1 == 0) and (j2 == 2)) {return Mji(ma,0)*Mji(mb,2)*FL_j1j2(0,2,ma,mb);}
else if ((j1 == 2) and (j2 == 0)) {return Mji(ma,2)*Mji(mb,0)*FL_j1j2(2,0,ma,mb);}
else if ((j1 == 1) and (j2 == 1)) {return Mji(ma,1)*Mji(mb,1)*FL_j1j2(1,1,ma,mb);}
else if ((j1 == 1) and (j2 == 2)) {return Mji(ma,1)*Mji(mb,2)*FL_j1j2(1,2,ma,mb);}
else if ((j1 == 2) and (j2 == 1)) {return Mji(ma,2)*Mji(mb,1)*FL_j1j2(2,1,ma,mb);}
else if ((j1 == 2) and (j2 == 2)) {return Mji(ma,2)*Mji(mb,2)*FL_j1j2(2,2,ma,mb);}
return pycuda::complex<double>(0.,0.);
}
__device__ double phasespace(double ma, double mb) {
double Q1 = get_q(ma,MKaon,MPion);
double Q2 = get_q(mb,MKaon,MPion);
double QB = get_q(MBs,ma,mb);
double phsp = Q1*Q2*QB;
return phsp;
}
__device__ pycuda::complex<double> hj1j2j1pj2p(int j1, int j2, int j1p, int j2p, int icat, int iev) {
return Mj1j2_temp(j1,j2,icat,iev)*pycuda::conj(Mj1j2_temp(j1p,j2p,icat,iev))*phasespace_temp[icat][iev];
}
// ##########################################
// PDF elements
__device__ double comp_num_fit(int j1, int j2, int h, int j1p, int j2p, int hp, int icat, int iev) {
return pycuda::real(Tj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp,icat,iev)*Nj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp)*hj1j2j1pj2p(j1,j2,j1p,j2p,icat,iev))*ghhp_phi(h,hp,icat,iev)*fjjphhp_cos1(j1,j1p,h,hp,icat,iev)*fjjphhp_cos2(j2,j2p,h,hp,icat,iev);
}
__device__ double comp_den_fit(int j1, int j2, int h, int j1p, int j2p, int hp, int icat, int iev) {
return pycuda::real(ITj1j2hj1pj2php_deltat(j1,j2,h,j1p,j2p,hp,icat,iev)*pycuda::complex<double>(nw_comp(year_opt[icat],trig_opt[icat],j1,j2,h,j1p,j2p,hp,0),nw_comp(year_opt[icat],trig_opt[icat],j1,j2,h,j1p,j2p,hp,1)));
}
__device__ double num_fit(int icat, int iev) {
return comp_num_fit(0,0,0,0,0,0,icat,iev)+comp_num_fit(0,1,0,0,1,0,icat,iev)+comp_num_fit(0,2,0,0,2,0,icat,iev)+comp_num_fit(1,0,0,1,0,0,icat,iev)+comp_num_fit(1,1,0,1,1,0,icat,iev)+comp_num_fit(1,1,1,1,1,1,icat,iev)
+comp_num_fit(1,1,2,1,1,2,icat,iev)+comp_num_fit(1,2,0,1,2,0,icat,iev)+comp_num_fit(1,2,1,1,2,1,icat,iev)+comp_num_fit(1,2,2,1,2,2,icat,iev)+comp_num_fit(2,0,0,2,0,0,icat,iev)+comp_num_fit(2,1,0,2,1,0,icat,iev)+comp_num_fit(2,1,1,2,1,1,icat,iev)
+comp_num_fit(2,1,2,2,1,2,icat,iev)+comp_num_fit(2,2,0,2,2,0,icat,iev)+comp_num_fit(2,2,1,2,2,1,icat,iev)+comp_num_fit(2,2,2,2,2,2,icat,iev)+comp_num_fit(2,2,3,2,2,3,icat,iev)+comp_num_fit(2,2,4,2,2,4,icat,iev)+2.*comp_num_fit(0,1,0,0,0,0,icat,iev)
+2.*comp_num_fit(0,1,0,1,0,0,icat,iev)+2.*comp_num_fit(0,1,0,2,0,0,icat,iev)+2.*comp_num_fit(0,2,0,0,0,0,icat,iev)+2.*comp_num_fit(0,2,0,0,1,0,icat,iev)+2.*comp_num_fit(0,2,0,1,0,0,icat,iev)+2.*comp_num_fit(0,2,0,1,1,0,icat,iev)
+2.*comp_num_fit(0,2,0,2,0,0,icat,iev)+2.*comp_num_fit(0,2,0,2,1,0,icat,iev)+2.*comp_num_fit(1,0,0,0,0,0,icat,iev)+2.*comp_num_fit(1,1,0,0,0,0,icat,iev)+2.*comp_num_fit(1,1,0,0,1,0,icat,iev)+2.*comp_num_fit(1,1,0,1,0,0,icat,iev)
+2.*comp_num_fit(1,1,0,2,0,0,icat,iev)+2.*comp_num_fit(1,1,1,0,0,0,icat,iev)+2.*comp_num_fit(1,1,1,0,1,0,icat,iev)+2.*comp_num_fit(1,1,1,0,2,0,icat,iev)+2.*comp_num_fit(1,1,1,1,0,0,icat,iev)+2.*comp_num_fit(1,1,1,1,1,0,icat,iev)
+2.*comp_num_fit(1,1,1,1,2,0,icat,iev)+2.*comp_num_fit(1,1,1,2,0,0,icat,iev)+2.*comp_num_fit(1,1,1,2,1,0,icat,iev)+2.*comp_num_fit(1,1,1,2,2,0,icat,iev)+2.*comp_num_fit(1,1,2,0,0,0,icat,iev)+2.*comp_num_fit(1,1,2,0,1,0,icat,iev)
+2.*comp_num_fit(1,1,2,0,2,0,icat,iev)+2.*comp_num_fit(1,1,2,1,0,0,icat,iev)+2.*comp_num_fit(1,1,2,1,1,0,icat,iev)+2.*comp_num_fit(1,1,2,1,1,1,icat,iev)+2.*comp_num_fit(1,1,2,1,2,0,icat,iev)+2.*comp_num_fit(1,1,2,1,2,1,icat,iev)
+2.*comp_num_fit(1,1,2,2,0,0,icat,iev)+2.*comp_num_fit(1,1,2,2,1,0,icat,iev)+2.*comp_num_fit(1,1,2,2,1,1,icat,iev)+2.*comp_num_fit(1,1,2,2,2,0,icat,iev)+2.*comp_num_fit(1,1,2,2,2,1,icat,iev)+2.*comp_num_fit(1,2,0,0,0,0,icat,iev)
+2.*comp_num_fit(1,2,0,0,1,0,icat,iev)+2.*comp_num_fit(1,2,0,0,2,0,icat,iev)+2.*comp_num_fit(1,2,0,1,0,0,icat,iev)+2.*comp_num_fit(1,2,0,1,1,0,icat,iev)+2.*comp_num_fit(1,2,0,2,0,0,icat,iev)+2.*comp_num_fit(1,2,0,2,1,0,icat,iev)
+2.*comp_num_fit(1,2,1,0,0,0,icat,iev)+2.*comp_num_fit(1,2,1,0,1,0,icat,iev)+2.*comp_num_fit(1,2,1,0,2,0,icat,iev)+2.*comp_num_fit(1,2,1,1,0,0,icat,iev)+2.*comp_num_fit(1,2,1,1,1,0,icat,iev)+2.*comp_num_fit(1,2,1,1,1,1,icat,iev)
+2.*comp_num_fit(1,2,1,1,2,0,icat,iev)+2.*comp_num_fit(1,2,1,2,0,0,icat,iev)+2.*comp_num_fit(1,2,1,2,1,0,icat,iev)+2.*comp_num_fit(1,2,1,2,1,1,icat,iev)+2.*comp_num_fit(1,2,1,2,2,0,icat,iev)+2.*comp_num_fit(1,2,2,0,0,0,icat,iev)
+2.*comp_num_fit(1,2,2,0,1,0,icat,iev)+2.*comp_num_fit(1,2,2,0,2,0,icat,iev)+2.*comp_num_fit(1,2,2,1,0,0,icat,iev)+2.*comp_num_fit(1,2,2,1,1,0,icat,iev)+2.*comp_num_fit(1,2,2,1,1,1,icat,iev)+2.*comp_num_fit(1,2,2,1,1,2,icat,iev)
+2.*comp_num_fit(1,2,2,1,2,0,icat,iev)+2.*comp_num_fit(1,2,2,1,2,1,icat,iev)+2.*comp_num_fit(1,2,2,2,0,0,icat,iev)+2.*comp_num_fit(1,2,2,2,1,0,icat,iev)+2.*comp_num_fit(1,2,2,2,1,1,icat,iev)+2.*comp_num_fit(1,2,2,2,1,2,icat,iev)
+2.*comp_num_fit(1,2,2,2,2,0,icat,iev)+2.*comp_num_fit(1,2,2,2,2,1,icat,iev)+2.*comp_num_fit(2,0,0,0,0,0,icat,iev)+2.*comp_num_fit(2,0,0,1,0,0,icat,iev)+2.*comp_num_fit(2,1,0,0,0,0,icat,iev)+2.*comp_num_fit(2,1,0,0,1,0,icat,iev)
+2.*comp_num_fit(2,1,0,1,0,0,icat,iev)+2.*comp_num_fit(2,1,0,1,1,0,icat,iev)+2.*comp_num_fit(2,1,0,2,0,0,icat,iev)+2.*comp_num_fit(2,1,1,0,0,0,icat,iev)+2.*comp_num_fit(2,1,1,0,1,0,icat,iev)+2.*comp_num_fit(2,1,1,0,2,0,icat,iev)
+2.*comp_num_fit(2,1,1,1,0,0,icat,iev)+2.*comp_num_fit(2,1,1,1,1,0,icat,iev)+2.*comp_num_fit(2,1,1,1,1,1,icat,iev)+2.*comp_num_fit(2,1,1,1,2,0,icat,iev)+2.*comp_num_fit(2,1,1,2,0,0,icat,iev)+2.*comp_num_fit(2,1,1,2,1,0,icat,iev)
+2.*comp_num_fit(2,1,1,2,2,0,icat,iev)+2.*comp_num_fit(2,1,2,0,0,0,icat,iev)+2.*comp_num_fit(2,1,2,0,1,0,icat,iev)+2.*comp_num_fit(2,1,2,0,2,0,icat,iev)+2.*comp_num_fit(2,1,2,1,0,0,icat,iev)+2.*comp_num_fit(2,1,2,1,1,0,icat,iev)
+2.*comp_num_fit(2,1,2,1,1,1,icat,iev)+2.*comp_num_fit(2,1,2,1,1,2,icat,iev)+2.*comp_num_fit(2,1,2,1,2,0,icat,iev)+2.*comp_num_fit(2,1,2,1,2,1,icat,iev)+2.*comp_num_fit(2,1,2,2,0,0,icat,iev)+2.*comp_num_fit(2,1,2,2,1,0,icat,iev)
+2.*comp_num_fit(2,1,2,2,1,1,icat,iev)+2.*comp_num_fit(2,1,2,2,2,0,icat,iev)+2.*comp_num_fit(2,1,2,2,2,1,icat,iev)+2.*comp_num_fit(2,2,0,0,0,0,icat,iev)+2.*comp_num_fit(2,2,0,0,1,0,icat,iev)+2.*comp_num_fit(2,2,0,0,2,0,icat,iev)
+2.*comp_num_fit(2,2,0,1,0,0,icat,iev)+2.*comp_num_fit(2,2,0,1,1,0,icat,iev)+2.*comp_num_fit(2,2,0,1,2,0,icat,iev)+2.*comp_num_fit(2,2,0,2,0,0,icat,iev)+2.*comp_num_fit(2,2,0,2,1,0,icat,iev)+2.*comp_num_fit(2,2,1,0,0,0,icat,iev)
+2.*comp_num_fit(2,2,1,0,1,0,icat,iev)+2.*comp_num_fit(2,2,1,0,2,0,icat,iev)+2.*comp_num_fit(2,2,1,1,0,0,icat,iev)+2.*comp_num_fit(2,2,1,1,1,0,icat,iev)+2.*comp_num_fit(2,2,1,1,1,1,icat,iev)+2.*comp_num_fit(2,2,1,1,2,0,icat,iev)
+2.*comp_num_fit(2,2,1,1,2,1,icat,iev)+2.*comp_num_fit(2,2,1,2,0,0,icat,iev)+2.*comp_num_fit(2,2,1,2,1,0,icat,iev)+2.*comp_num_fit(2,2,1,2,1,1,icat,iev)+2.*comp_num_fit(2,2,1,2,2,0,icat,iev)+2.*comp_num_fit(2,2,2,0,0,0,icat,iev)
+2.*comp_num_fit(2,2,2,0,1,0,icat,iev)+2.*comp_num_fit(2,2,2,0,2,0,icat,iev)+2.*comp_num_fit(2,2,2,1,0,0,icat,iev)+2.*comp_num_fit(2,2,2,1,1,0,icat,iev)+2.*comp_num_fit(2,2,2,1,1,1,icat,iev)+2.*comp_num_fit(2,2,2,1,1,2,icat,iev)
+2.*comp_num_fit(2,2,2,1,2,0,icat,iev)+2.*comp_num_fit(2,2,2,1,2,1,icat,iev)+2.*comp_num_fit(2,2,2,1,2,2,icat,iev)+2.*comp_num_fit(2,2,2,2,0,0,icat,iev)+2.*comp_num_fit(2,2,2,2,1,0,icat,iev)+2.*comp_num_fit(2,2,2,2,1,1,icat,iev)
+2.*comp_num_fit(2,2,2,2,1,2,icat,iev)+2.*comp_num_fit(2,2,2,2,2,0,icat,iev)+2.*comp_num_fit(2,2,2,2,2,1,icat,iev)+2.*comp_num_fit(2,2,3,0,0,0,icat,iev)+2.*comp_num_fit(2,2,3,0,1,0,icat,iev)+2.*comp_num_fit(2,2,3,0,2,0,icat,iev)
+2.*comp_num_fit(2,2,3,1,0,0,icat,iev)+2.*comp_num_fit(2,2,3,1,1,0,icat,iev)+2.*comp_num_fit(2,2,3,1,1,1,icat,iev)+2.*comp_num_fit(2,2,3,1,1,2,icat,iev)+2.*comp_num_fit(2,2,3,1,2,0,icat,iev)+2.*comp_num_fit(2,2,3,1,2,1,icat,iev)
+2.*comp_num_fit(2,2,3,1,2,2,icat,iev)+2.*comp_num_fit(2,2,3,2,0,0,icat,iev)+2.*comp_num_fit(2,2,3,2,1,0,icat,iev)+2.*comp_num_fit(2,2,3,2,1,1,icat,iev)+2.*comp_num_fit(2,2,3,2,1,2,icat,iev)+2.*comp_num_fit(2,2,3,2,2,0,icat,iev)
+2.*comp_num_fit(2,2,3,2,2,1,icat,iev)+2.*comp_num_fit(2,2,3,2,2,2,icat,iev)+2.*comp_num_fit(2,2,4,0,0,0,icat,iev)+2.*comp_num_fit(2,2,4,0,1,0,icat,iev)+2.*comp_num_fit(2,2,4,0,2,0,icat,iev)+2.*comp_num_fit(2,2,4,1,0,0,icat,iev)
+2.*comp_num_fit(2,2,4,1,1,0,icat,iev)+2.*comp_num_fit(2,2,4,1,1,1,icat,iev)+2.*comp_num_fit(2,2,4,1,1,2,icat,iev)+2.*comp_num_fit(2,2,4,1,2,0,icat,iev)+2.*comp_num_fit(2,2,4,1,2,1,icat,iev)+2.*comp_num_fit(2,2,4,1,2,2,icat,iev)
+2.*comp_num_fit(2,2,4,2,0,0,icat,iev)+2.*comp_num_fit(2,2,4,2,1,0,icat,iev)+2.*comp_num_fit(2,2,4,2,1,1,icat,iev)+2.*comp_num_fit(2,2,4,2,1,2,icat,iev)+2.*comp_num_fit(2,2,4,2,2,0,icat,iev)+2.*comp_num_fit(2,2,4,2,2,1,icat,iev)
+2.*comp_num_fit(2,2,4,2,2,2,icat,iev)+2.*comp_num_fit(2,2,4,2,2,3,icat,iev);
}
__device__ double den_fit(int icat, int iev) {
return comp_den_fit(0,0,0,0,0,0,icat,iev)+comp_den_fit(0,1,0,0,1,0,icat,iev)+comp_den_fit(0,2,0,0,2,0,icat,iev)+comp_den_fit(1,0,0,1,0,0,icat,iev)+comp_den_fit(1,1,0,1,1,0,icat,iev)+comp_den_fit(1,1,1,1,1,1,icat,iev)
+comp_den_fit(1,1,2,1,1,2,icat,iev)+comp_den_fit(1,2,0,1,2,0,icat,iev)+comp_den_fit(1,2,1,1,2,1,icat,iev)+comp_den_fit(1,2,2,1,2,2,icat,iev)+comp_den_fit(2,0,0,2,0,0,icat,iev)+comp_den_fit(2,1,0,2,1,0,icat,iev)+comp_den_fit(2,1,1,2,1,1,icat,iev)
+comp_den_fit(2,1,2,2,1,2,icat,iev)+comp_den_fit(2,2,0,2,2,0,icat,iev)+comp_den_fit(2,2,1,2,2,1,icat,iev)+comp_den_fit(2,2,2,2,2,2,icat,iev)+comp_den_fit(2,2,3,2,2,3,icat,iev)+comp_den_fit(2,2,4,2,2,4,icat,iev)+2.*comp_den_fit(0,1,0,0,0,0,icat,iev)
+2.*comp_den_fit(0,1,0,1,0,0,icat,iev)+2.*comp_den_fit(0,1,0,2,0,0,icat,iev)+2.*comp_den_fit(0,2,0,0,0,0,icat,iev)+2.*comp_den_fit(0,2,0,0,1,0,icat,iev)+2.*comp_den_fit(0,2,0,1,0,0,icat,iev)+2.*comp_den_fit(0,2,0,1,1,0,icat,iev)
+2.*comp_den_fit(0,2,0,2,0,0,icat,iev)+2.*comp_den_fit(0,2,0,2,1,0,icat,iev)+2.*comp_den_fit(1,0,0,0,0,0,icat,iev)+2.*comp_den_fit(1,1,0,0,0,0,icat,iev)+2.*comp_den_fit(1,1,0,0,1,0,icat,iev)+2.*comp_den_fit(1,1,0,1,0,0,icat,iev)
+2.*comp_den_fit(1,1,0,2,0,0,icat,iev)+2.*comp_den_fit(1,1,1,0,0,0,icat,iev)+2.*comp_den_fit(1,1,1,0,1,0,icat,iev)+2.*comp_den_fit(1,1,1,0,2,0,icat,iev)+2.*comp_den_fit(1,1,1,1,0,0,icat,iev)+2.*comp_den_fit(1,1,1,1,1,0,icat,iev)
+2.*comp_den_fit(1,1,1,1,2,0,icat,iev)+2.*comp_den_fit(1,1,1,2,0,0,icat,iev)+2.*comp_den_fit(1,1,1,2,1,0,icat,iev)+2.*comp_den_fit(1,1,1,2,2,0,icat,iev)+2.*comp_den_fit(1,1,2,0,0,0,icat,iev)+2.*comp_den_fit(1,1,2,0,1,0,icat,iev)
+2.*comp_den_fit(1,1,2,0,2,0,icat,iev)+2.*comp_den_fit(1,1,2,1,0,0,icat,iev)+2.*comp_den_fit(1,1,2,1,1,0,icat,iev)+2.*comp_den_fit(1,1,2,1,1,1,icat,iev)+2.*comp_den_fit(1,1,2,1,2,0,icat,iev)+2.*comp_den_fit(1,1,2,1,2,1,icat,iev)
+2.*comp_den_fit(1,1,2,2,0,0,icat,iev)+2.*comp_den_fit(1,1,2,2,1,0,icat,iev)+2.*comp_den_fit(1,1,2,2,1,1,icat,iev)+2.*comp_den_fit(1,1,2,2,2,0,icat,iev)+2.*comp_den_fit(1,1,2,2,2,1,icat,iev)+2.*comp_den_fit(1,2,0,0,0,0,icat,iev)
+2.*comp_den_fit(1,2,0,0,1,0,icat,iev)+2.*comp_den_fit(1,2,0,0,2,0,icat,iev)+2.*comp_den_fit(1,2,0,1,0,0,icat,iev)+2.*comp_den_fit(1,2,0,1,1,0,icat,iev)+2.*comp_den_fit(1,2,0,2,0,0,icat,iev)+2.*comp_den_fit(1,2,0,2,1,0,icat,iev)
+2.*comp_den_fit(1,2,1,0,0,0,icat,iev)+2.*comp_den_fit(1,2,1,0,1,0,icat,iev)+2.*comp_den_fit(1,2,1,0,2,0,icat,iev)+2.*comp_den_fit(1,2,1,1,0,0,icat,iev)+2.*comp_den_fit(1,2,1,1,1,0,icat,iev)+2.*comp_den_fit(1,2,1,1,1,1,icat,iev)
+2.*comp_den_fit(1,2,1,1,2,0,icat,iev)+2.*comp_den_fit(1,2,1,2,0,0,icat,iev)+2.*comp_den_fit(1,2,1,2,1,0,icat,iev)+2.*comp_den_fit(1,2,1,2,1,1,icat,iev)+2.*comp_den_fit(1,2,1,2,2,0,icat,iev)+2.*comp_den_fit(1,2,2,0,0,0,icat,iev)
+2.*comp_den_fit(1,2,2,0,1,0,icat,iev)+2.*comp_den_fit(1,2,2,0,2,0,icat,iev)+2.*comp_den_fit(1,2,2,1,0,0,icat,iev)+2.*comp_den_fit(1,2,2,1,1,0,icat,iev)+2.*comp_den_fit(1,2,2,1,1,1,icat,iev)+2.*comp_den_fit(1,2,2,1,1,2,icat,iev)
+2.*comp_den_fit(1,2,2,1,2,0,icat,iev)+2.*comp_den_fit(1,2,2,1,2,1,icat,iev)+2.*comp_den_fit(1,2,2,2,0,0,icat,iev)+2.*comp_den_fit(1,2,2,2,1,0,icat,iev)+2.*comp_den_fit(1,2,2,2,1,1,icat,iev)+2.*comp_den_fit(1,2,2,2,1,2,icat,iev)
+2.*comp_den_fit(1,2,2,2,2,0,icat,iev)+2.*comp_den_fit(1,2,2,2,2,1,icat,iev)+2.*comp_den_fit(2,0,0,0,0,0,icat,iev)+2.*comp_den_fit(2,0,0,1,0,0,icat,iev)+2.*comp_den_fit(2,1,0,0,0,0,icat,iev)+2.*comp_den_fit(2,1,0,0,1,0,icat,iev)
+2.*comp_den_fit(2,1,0,1,0,0,icat,iev)+2.*comp_den_fit(2,1,0,1,1,0,icat,iev)+2.*comp_den_fit(2,1,0,2,0,0,icat,iev)+2.*comp_den_fit(2,1,1,0,0,0,icat,iev)+2.*comp_den_fit(2,1,1,0,1,0,icat,iev)+2.*comp_den_fit(2,1,1,0,2,0,icat,iev)
+2.*comp_den_fit(2,1,1,1,0,0,icat,iev)+2.*comp_den_fit(2,1,1,1,1,0,icat,iev)+2.*comp_den_fit(2,1,1,1,1,1,icat,iev)+2.*comp_den_fit(2,1,1,1,2,0,icat,iev)+2.*comp_den_fit(2,1,1,2,0,0,icat,iev)+2.*comp_den_fit(2,1,1,2,1,0,icat,iev)
+2.*comp_den_fit(2,1,1,2,2,0,icat,iev)+2.*comp_den_fit(2,1,2,0,0,0,icat,iev)+2.*comp_den_fit(2,1,2,0,1,0,icat,iev)+2.*comp_den_fit(2,1,2,0,2,0,icat,iev)+2.*comp_den_fit(2,1,2,1,0,0,icat,iev)+2.*comp_den_fit(2,1,2,1,1,0,icat,iev)
+2.*comp_den_fit(2,1,2,1,1,1,icat,iev)+2.*comp_den_fit(2,1,2,1,1,2,icat,iev)+2.*comp_den_fit(2,1,2,1,2,0,icat,iev)+2.*comp_den_fit(2,1,2,1,2,1,icat,iev)+2.*comp_den_fit(2,1,2,2,0,0,icat,iev)+2.*comp_den_fit(2,1,2,2,1,0,icat,iev)
+2.*comp_den_fit(2,1,2,2,1,1,icat,iev)+2.*comp_den_fit(2,1,2,2,2,0,icat,iev)+2.*comp_den_fit(2,1,2,2,2,1,icat,iev)+2.*comp_den_fit(2,2,0,0,0,0,icat,iev)+2.*comp_den_fit(2,2,0,0,1,0,icat,iev)+2.*comp_den_fit(2,2,0,0,2,0,icat,iev)
+2.*comp_den_fit(2,2,0,1,0,0,icat,iev)+2.*comp_den_fit(2,2,0,1,1,0,icat,iev)+2.*comp_den_fit(2,2,0,1,2,0,icat,iev)+2.*comp_den_fit(2,2,0,2,0,0,icat,iev)+2.*comp_den_fit(2,2,0,2,1,0,icat,iev)+2.*comp_den_fit(2,2,1,0,0,0,icat,iev)
+2.*comp_den_fit(2,2,1,0,1,0,icat,iev)+2.*comp_den_fit(2,2,1,0,2,0,icat,iev)+2.*comp_den_fit(2,2,1,1,0,0,icat,iev)+2.*comp_den_fit(2,2,1,1,1,0,icat,iev)+2.*comp_den_fit(2,2,1,1,1,1,icat,iev)+2.*comp_den_fit(2,2,1,1,2,0,icat,iev)
+2.*comp_den_fit(2,2,1,1,2,1,icat,iev)+2.*comp_den_fit(2,2,1,2,0,0,icat,iev)+2.*comp_den_fit(2,2,1,2,1,0,icat,iev)+2.*comp_den_fit(2,2,1,2,1,1,icat,iev)+2.*comp_den_fit(2,2,1,2,2,0,icat,iev)+2.*comp_den_fit(2,2,2,0,0,0,icat,iev)
+2.*comp_den_fit(2,2,2,0,1,0,icat,iev)+2.*comp_den_fit(2,2,2,0,2,0,icat,iev)+2.*comp_den_fit(2,2,2,1,0,0,icat,iev)+2.*comp_den_fit(2,2,2,1,1,0,icat,iev)+2.*comp_den_fit(2,2,2,1,1,1,icat,iev)+2.*comp_den_fit(2,2,2,1,1,2,icat,iev)
+2.*comp_den_fit(2,2,2,1,2,0,icat,iev)+2.*comp_den_fit(2,2,2,1,2,1,icat,iev)+2.*comp_den_fit(2,2,2,1,2,2,icat,iev)+2.*comp_den_fit(2,2,2,2,0,0,icat,iev)+2.*comp_den_fit(2,2,2,2,1,0,icat,iev)+2.*comp_den_fit(2,2,2,2,1,1,icat,iev)
+2.*comp_den_fit(2,2,2,2,1,2,icat,iev)+2.*comp_den_fit(2,2,2,2,2,0,icat,iev)+2.*comp_den_fit(2,2,2,2,2,1,icat,iev)+2.*comp_den_fit(2,2,3,0,0,0,icat,iev)+2.*comp_den_fit(2,2,3,0,1,0,icat,iev)+2.*comp_den_fit(2,2,3,0,2,0,icat,iev)
+2.*comp_den_fit(2,2,3,1,0,0,icat,iev)+2.*comp_den_fit(2,2,3,1,1,0,icat,iev)+2.*comp_den_fit(2,2,3,1,1,1,icat,iev)+2.*comp_den_fit(2,2,3,1,1,2,icat,iev)+2.*comp_den_fit(2,2,3,1,2,0,icat,iev)+2.*comp_den_fit(2,2,3,1,2,1,icat,iev)
+2.*comp_den_fit(2,2,3,1,2,2,icat,iev)+2.*comp_den_fit(2,2,3,2,0,0,icat,iev)+2.*comp_den_fit(2,2,3,2,1,0,icat,iev)+2.*comp_den_fit(2,2,3,2,1,1,icat,iev)+2.*comp_den_fit(2,2,3,2,1,2,icat,iev)+2.*comp_den_fit(2,2,3,2,2,0,icat,iev)
+2.*comp_den_fit(2,2,3,2,2,1,icat,iev)+2.*comp_den_fit(2,2,3,2,2,2,icat,iev)+2.*comp_den_fit(2,2,4,0,0,0,icat,iev)+2.*comp_den_fit(2,2,4,0,1,0,icat,iev)+2.*comp_den_fit(2,2,4,0,2,0,icat,iev)+2.*comp_den_fit(2,2,4,1,0,0,icat,iev)
+2.*comp_den_fit(2,2,4,1,1,0,icat,iev)+2.*comp_den_fit(2,2,4,1,1,1,icat,iev)+2.*comp_den_fit(2,2,4,1,1,2,icat,iev)+2.*comp_den_fit(2,2,4,1,2,0,icat,iev)+2.*comp_den_fit(2,2,4,1,2,1,icat,iev)+2.*comp_den_fit(2,2,4,1,2,2,icat,iev)
+2.*comp_den_fit(2,2,4,2,0,0,icat,iev)+2.*comp_den_fit(2,2,4,2,1,0,icat,iev)+2.*comp_den_fit(2,2,4,2,1,1,icat,iev)+2.*comp_den_fit(2,2,4,2,1,2,icat,iev)+2.*comp_den_fit(2,2,4,2,2,0,icat,iev)+2.*comp_den_fit(2,2,4,2,2,1,icat,iev)
+2.*comp_den_fit(2,2,4,2,2,2,icat,iev)+2.*comp_den_fit(2,2,4,2,2,3,icat,iev);
}
// ##########################################
// PDF evaluator
__device__ void set_buffer_options(double *options, int icat) {
wide_window = (int) options[0];
year_opt[icat] = (int) options[1];
trig_opt[icat] = (int) options[2];
alt_fit = options[3];
option = options[4];
inftres = options[5];
acctype = options[6];
A_j1 = options[7];
A_j2 = options[8];
A_h = options[9];
A_j1p = options[10];
A_j2p = options[11];
A_hp = options[12];
qcomp = options[13];
}
__device__ void set_buffer_amplitudes(double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params) {
reA00 = re_amps[0];
reA01 = re_amps[1];
reA10 = re_amps[2];
reA02 = re_amps[3];
reA20 = re_amps[4];
reA110 = re_amps[5];
reA11par = re_amps[6];
reA11perp = re_amps[7];
reA120 = re_amps[8];
reA12par = re_amps[9];
reA12perp = re_amps[10];
reA210 = re_amps[11];
reA21par = re_amps[12];
reA21perp = re_amps[13];
reA220 = re_amps[14];
reA22par = re_amps[15];
reA22perp = re_amps[16];
reA22par2 = re_amps[17];
reA22perp2 = re_amps[18];
DCP_SS = dirCP_asyms[0];
DCP_SV = dirCP_asyms[1];
DCP_VS = dirCP_asyms[2];
DCP_ST = dirCP_asyms[3];
DCP_TS = dirCP_asyms[4];
DCP = dirCP_asyms[5];
DCP_VT = dirCP_asyms[6];
DCP_TV = dirCP_asyms[7];
DCP_TT = dirCP_asyms[8];
imA00 = im_amps[0];
imA01 = im_amps[1];
imA10 = im_amps[2];
imA02 = im_amps[3];
imA20 = im_amps[4];
imA110 = im_amps[5];
imA11par = im_amps[6];
imA11perp = im_amps[7];
imA120 = im_amps[8];
imA12par = im_amps[9];
imA12perp = im_amps[10];
imA210 = im_amps[11];
imA21par = im_amps[12];
imA21perp = im_amps[13];
imA220 = im_amps[14];
imA22par = im_amps[15];
imA22perp = im_amps[16];
imA22par2 = im_amps[17];
imA22perp2 = im_amps[18];
phis = weak_phases[0];
dphi_SS = weak_phases[1];
dphi_SV = weak_phases[2];
dphi_VS = weak_phases[3];
dphi_ST = weak_phases[4];
dphi_TS = weak_phases[5];
dphi_VT = weak_phases[6];
dphi_TV = weak_phases[7];
dphi_TT = weak_phases[8];
delta_m_freq = mixing_params[0];
gamma_Bs_freq = mixing_params[1];
delta_gamma_freq = mixing_params[2];
p0metac_tag_SSK = calib_params[0];
p0metac_tag_OS = calib_params[1];
Dp0half_tag_SSK = calib_params[2];
Dp0half_tag_OS = calib_params[3];
p1_tag_SSK = calib_params[4];
p1_tag_OS = calib_params[5];
Dp1half_tag_SSK = calib_params[6];
Dp1half_tag_OS = calib_params[7];
etac_tag_SSK = calib_params[8];
etac_tag_OS = calib_params[9];
deltatmean_tres_11 = calib_params[10];
p0_tres_11 = calib_params[11];
p1_tres_11 = calib_params[12];
deltatmean_tres_12 = calib_params[13];
p0_tres_12 = calib_params[14];
p1_tres_12 = calib_params[15];
mv = calib_params[16];
ms = calib_params[17];
mt = calib_params[18];
gv = calib_params[19];
gs = calib_params[20];
gt = calib_params[21];
c1_mass_swave = calib_params[22];
c2_mass_swave = calib_params[23];
c3_mass_swave = calib_params[24];
c4_mass_swave = calib_params[25];
c5_mass_swave = calib_params[26];
c6_mass_swave = calib_params[27];
c7_mass_swave = calib_params[28];
c8_mass_swave = calib_params[29];
c9_mass_swave = calib_params[30];
res_mass = calib_params[31];
if (wide_window == 0) {DCP_prod = -0.0101;}
else {DCP_prod = -0.0072;}
reAj1j2h_temp[0][0][0] = pycuda::real(Aj1j2h(0,0,0));
reAj1j2h_temp[0][1][0] = pycuda::real(Aj1j2h(0,1,0));
reAj1j2h_temp[1][0][0] = pycuda::real(Aj1j2h(1,0,0));
reAj1j2h_temp[0][2][0] = pycuda::real(Aj1j2h(0,2,0));
reAj1j2h_temp[2][0][0] = pycuda::real(Aj1j2h(2,0,0));
reAj1j2h_temp[1][1][0] = pycuda::real(Aj1j2h(1,1,0));
reAj1j2h_temp[1][1][1] = pycuda::real(Aj1j2h(1,1,1));
reAj1j2h_temp[1][1][2] = pycuda::real(Aj1j2h(1,1,2));
reAj1j2h_temp[1][2][0] = pycuda::real(Aj1j2h(1,2,0));
reAj1j2h_temp[1][2][1] = pycuda::real(Aj1j2h(1,2,1));
reAj1j2h_temp[1][2][2] = pycuda::real(Aj1j2h(1,2,2));
reAj1j2h_temp[2][1][0] = pycuda::real(Aj1j2h(2,1,0));
reAj1j2h_temp[2][1][1] = pycuda::real(Aj1j2h(2,1,1));
reAj1j2h_temp[2][1][2] = pycuda::real(Aj1j2h(2,1,2));
reAj1j2h_temp[2][2][0] = pycuda::real(Aj1j2h(2,2,0));
reAj1j2h_temp[2][2][1] = pycuda::real(Aj1j2h(2,2,1));
reAj1j2h_temp[2][2][2] = pycuda::real(Aj1j2h(2,2,2));
reAj1j2h_temp[2][2][3] = pycuda::real(Aj1j2h(2,2,3));
reAj1j2h_temp[2][2][4] = pycuda::real(Aj1j2h(2,2,4));
imAj1j2h_temp[0][0][0] = pycuda::imag(Aj1j2h(0,0,0));
imAj1j2h_temp[0][1][0] = pycuda::imag(Aj1j2h(0,1,0));
imAj1j2h_temp[1][0][0] = pycuda::imag(Aj1j2h(1,0,0));
imAj1j2h_temp[0][2][0] = pycuda::imag(Aj1j2h(0,2,0));
imAj1j2h_temp[2][0][0] = pycuda::imag(Aj1j2h(2,0,0));
imAj1j2h_temp[1][1][0] = pycuda::imag(Aj1j2h(1,1,0));
imAj1j2h_temp[1][1][1] = pycuda::imag(Aj1j2h(1,1,1));
imAj1j2h_temp[1][1][2] = pycuda::imag(Aj1j2h(1,1,2));
imAj1j2h_temp[1][2][0] = pycuda::imag(Aj1j2h(1,2,0));
imAj1j2h_temp[1][2][1] = pycuda::imag(Aj1j2h(1,2,1));
imAj1j2h_temp[1][2][2] = pycuda::imag(Aj1j2h(1,2,2));
imAj1j2h_temp[2][1][0] = pycuda::imag(Aj1j2h(2,1,0));
imAj1j2h_temp[2][1][1] = pycuda::imag(Aj1j2h(2,1,1));
imAj1j2h_temp[2][1][2] = pycuda::imag(Aj1j2h(2,1,2));
imAj1j2h_temp[2][2][0] = pycuda::imag(Aj1j2h(2,2,0));
imAj1j2h_temp[2][2][1] = pycuda::imag(Aj1j2h(2,2,1));
imAj1j2h_temp[2][2][2] = pycuda::imag(Aj1j2h(2,2,2));
imAj1j2h_temp[2][2][3] = pycuda::imag(Aj1j2h(2,2,3));
imAj1j2h_temp[2][2][4] = pycuda::imag(Aj1j2h(2,2,4));
reAbarj1j2h_temp[0][0][0] = pycuda::real(Abarj1j2h(0,0,0));
reAbarj1j2h_temp[0][1][0] = pycuda::real(Abarj1j2h(0,1,0));
reAbarj1j2h_temp[1][0][0] = pycuda::real(Abarj1j2h(1,0,0));
reAbarj1j2h_temp[0][2][0] = pycuda::real(Abarj1j2h(0,2,0));
reAbarj1j2h_temp[2][0][0] = pycuda::real(Abarj1j2h(2,0,0));
reAbarj1j2h_temp[1][1][0] = pycuda::real(Abarj1j2h(1,1,0));
reAbarj1j2h_temp[1][1][1] = pycuda::real(Abarj1j2h(1,1,1));
reAbarj1j2h_temp[1][1][2] = pycuda::real(Abarj1j2h(1,1,2));
reAbarj1j2h_temp[1][2][0] = pycuda::real(Abarj1j2h(1,2,0));
reAbarj1j2h_temp[1][2][1] = pycuda::real(Abarj1j2h(1,2,1));
reAbarj1j2h_temp[1][2][2] = pycuda::real(Abarj1j2h(1,2,2));
reAbarj1j2h_temp[2][1][0] = pycuda::real(Abarj1j2h(2,1,0));
reAbarj1j2h_temp[2][1][1] = pycuda::real(Abarj1j2h(2,1,1));
reAbarj1j2h_temp[2][1][2] = pycuda::real(Abarj1j2h(2,1,2));
reAbarj1j2h_temp[2][2][0] = pycuda::real(Abarj1j2h(2,2,0));
reAbarj1j2h_temp[2][2][1] = pycuda::real(Abarj1j2h(2,2,1));
reAbarj1j2h_temp[2][2][2] = pycuda::real(Abarj1j2h(2,2,2));
reAbarj1j2h_temp[2][2][3] = pycuda::real(Abarj1j2h(2,2,3));
reAbarj1j2h_temp[2][2][4] = pycuda::real(Abarj1j2h(2,2,4));
imAbarj1j2h_temp[0][0][0] = pycuda::imag(Abarj1j2h(0,0,0));
imAbarj1j2h_temp[0][1][0] = pycuda::imag(Abarj1j2h(0,1,0));
imAbarj1j2h_temp[1][0][0] = pycuda::imag(Abarj1j2h(1,0,0));
imAbarj1j2h_temp[0][2][0] = pycuda::imag(Abarj1j2h(0,2,0));
imAbarj1j2h_temp[2][0][0] = pycuda::imag(Abarj1j2h(2,0,0));
imAbarj1j2h_temp[1][1][0] = pycuda::imag(Abarj1j2h(1,1,0));
imAbarj1j2h_temp[1][1][1] = pycuda::imag(Abarj1j2h(1,1,1));
imAbarj1j2h_temp[1][1][2] = pycuda::imag(Abarj1j2h(1,1,2));
imAbarj1j2h_temp[1][2][0] = pycuda::imag(Abarj1j2h(1,2,0));
imAbarj1j2h_temp[1][2][1] = pycuda::imag(Abarj1j2h(1,2,1));
imAbarj1j2h_temp[1][2][2] = pycuda::imag(Abarj1j2h(1,2,2));
imAbarj1j2h_temp[2][1][0] = pycuda::imag(Abarj1j2h(2,1,0));
imAbarj1j2h_temp[2][1][1] = pycuda::imag(Abarj1j2h(2,1,1));
imAbarj1j2h_temp[2][1][2] = pycuda::imag(Abarj1j2h(2,1,2));
imAbarj1j2h_temp[2][2][0] = pycuda::imag(Abarj1j2h(2,2,0));
imAbarj1j2h_temp[2][2][1] = pycuda::imag(Abarj1j2h(2,2,1));
imAbarj1j2h_temp[2][2][2] = pycuda::imag(Abarj1j2h(2,2,2));
imAbarj1j2h_temp[2][2][3] = pycuda::imag(Abarj1j2h(2,2,3));
imAbarj1j2h_temp[2][2][4] = pycuda::imag(Abarj1j2h(2,2,4));
}
__device__ void set_buffer_differential_terms(double *mass_integrals, int icat, int iev) {
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
double f1,f2,s1,s2,x1,x2;
if (acctype == 3) {
f1 = 1.;
f2 = 0.;
s1 = p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12);
//s1 = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*1.1779041429731925;
//s1 = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*t_err[icat][iev];
s2 = 1.;
x1 = t[icat][iev]/(sqrt(2.)*s1);
x2 = t[icat][iev]/(sqrt(2.)*s2);
}
else {
f1 = 1.;
f2 = 0.;
if (year_opt == 0) {s1 = p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11);}
else {s1 = p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12);}
//if (year_opt == 0) {s1 = (p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11))*1.1779041429731925;}
//else {s1 = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*1.1779041429731925;}
//if (year_opt == 0) {s1 = (p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11))*t_err[icat][iev];}
//else {s1 = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*t_err[icat][iev];}
s2 = 1.;
x1 = t[icat][iev]/(sqrt(2.)*s1);
x2 = t[icat][iev]/(sqrt(2.)*s2);
}
pycuda::complex<double> z1_hyper_plus = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z2_hyper_plus = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_hyper_minus = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z2_hyper_minus = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_trigo = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
pycuda::complex<double> z2_trigo = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
double conv_exp_hyper_plus = pycuda::real(f1*conv_exp(x1,z1_hyper_plus)+f2*conv_exp(x2,z2_hyper_plus));
double conv_exp_hyper_minus = pycuda::real(f1*conv_exp(x1,z1_hyper_minus)+f2*conv_exp(x2,z2_hyper_minus));
pycuda::complex<double> conv_exp_trigo = f1*conv_exp(x1,z1_trigo)+f2*conv_exp(x2,z2_trigo);
T_cosh_temp[icat][iev] = 0.5*(conv_exp_hyper_plus + conv_exp_hyper_minus);
T_sinh_temp[icat][iev] = 0.5*(conv_exp_hyper_plus - conv_exp_hyper_minus);
T_cos_temp[icat][iev] = pycuda::real(conv_exp_trigo);
T_sin_temp[icat][iev] = pycuda::imag(conv_exp_trigo);
zeta_temp[icat][iev] = zeta(decision_SSK[icat][iev],decision_OS[icat][iev],etamistag_SSK[icat][iev],etamistag_OS[icat][iev]);
DCP_tzero_temp[icat][iev] = DCP_tzero(decision_SSK[icat][iev],decision_OS[icat][iev],etamistag_SSK[icat][iev],etamistag_OS[icat][iev]);
for (int i=0; i<18; ++i) {fi_cos1_temp[i][icat][iev] = fi(cos1[icat][iev],i+1);}
for (int i=0; i<18; ++i) {fi_cos2_temp[i][icat][iev] = fi(cos2[icat][iev],i+1);}
for (int i=0; i<15; ++i) {gi_temp[i][icat][iev] = gi(phi[icat][iev],i+1);}
for (int j1=0; j1<3; ++j1) {
for (int j2=0; j2<3; ++j2) {
pycuda::complex<double> M_temp = Mj1j2(m1[icat][iev],m2[icat][iev],j1,j2);
reMj1j2_temp[j1][j2][icat][iev] = pycuda::real(M_temp);
imMj1j2_temp[j1][j2][icat][iev] = pycuda::imag(M_temp);
}
}
phasespace_temp[icat][iev] = phasespace(m1[icat][iev],m2[icat][iev]);
}
__device__ double Factorial(int n) {
if(n <= 0) return 1.;
double x = 1;
int b = 0;
do {
b++;
x *= b;
} while(b!=n);
return x;
}
__device__ void set_buffer_integral_terms(int icat, int iev) {
double s1_deltat;
if (acctype == 3) {
s1_deltat = p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12);
//s1_deltat = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*1.1779041429731925;
//s1_deltat = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*t_err[icat][iev];
for (int i=0; i<6; ++i) {spl_knot_vector[year_opt[icat]][trig_opt[icat]][i] = knot_gen(wide_window,i);}
}
else {
if (year_opt == 0) {s1_deltat = p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11);}
else {s1_deltat = p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12);}
//if (year_opt == 0) {s1_deltat = (p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11))*1.1779041429731925;}
//else {s1_deltat = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*1.1779041429731925;}
//if (year_opt == 0) {s1_deltat = (p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11))*t_err[icat][iev];}
//else {s1_deltat = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*t_err[icat][iev];}
for (int i=0; i<6; ++i) {spl_knot_vector[year_opt[icat]][trig_opt[icat]][i] = spline_knot(wide_window,i);}
}
pycuda::complex<double> z1_hyper_plus_deltat = s1_deltat/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_hyper_minus_deltat = s1_deltat/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_trigo_deltat = s1_deltat/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
double spl_knot_x1_vector_deltat[6];
double spl_coef_array_deltat[5][4][4][4];
if (acctype == 1 or acctype == 2) {
spl_knot_x1_vector_deltat[0] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][0]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[1] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][1]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[2] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][2]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[3] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][3]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[4] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][4]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[5] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][5]/(sqrt(2.)*s1_deltat);
for (int ibin=0; ibin<5; ++ibin) {
for (int k=0; k<4; ++k) {
for (int i=0; i<(k+1); ++i) {
for (int j=0; j<(i+1); ++j) {
spl_coef_array_deltat[ibin][k][i][j] = spline_coef(year_opt[icat],trig_opt[icat],wide_window,ibin,k)*Factorial(k)/Factorial(k-i)/Factorial(i-j)/Factorial(j)*pow(s1_deltat/sqrt(2.),i+1)*pow(0.,k-i);
}
}
}
}
}
else if (acctype == 3) {
spl_knot_x1_vector_deltat[0] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][0]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[1] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][1]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[2] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][2]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[3] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][3]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[4] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][4]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[5] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][5]/(sqrt(2.)*s1_deltat);
for (int ibin=0; ibin<5; ++ibin) {
for (int k=0; k<4; ++k) {
for (int i=0; i<(k+1); ++i) {
for (int j=0; j<(i+1); ++j) {
spl_coef_array_deltat[ibin][k][i][j] = coef_gen(wide_window,ibin,k)*Factorial(k)/Factorial(k-i)/Factorial(i-j)/Factorial(j)*pow(s1_deltat/sqrt(2.),i+1)*pow(0.,k-i);
}
}
}
}
}
double integral_conv_exp_hyper_plus_deltat = 0;
double integral_conv_exp_hyper_minus_deltat = 0;
pycuda::complex<double> integral_conv_exp_trigo_deltat = pycuda::complex<double>(0.,0.);
if (acctype == 0) {
integral_conv_exp_hyper_plus_deltat += pycuda::real(s1_deltat/sqrt(2.)*Mn(0.,12./(sqrt(2.)*s1_deltat),z1_hyper_plus_deltat,0)*Kn(z1_hyper_plus_deltat,0));
integral_conv_exp_hyper_minus_deltat += pycuda::real(s1_deltat/sqrt(2.)*Mn(0.,12./(sqrt(2.)*s1_deltat),z1_hyper_minus_deltat,0)*Kn(z1_hyper_minus_deltat,0));
integral_conv_exp_trigo_deltat += s1_deltat/sqrt(2.)*Mn(0.,12./(sqrt(2.)*s1_deltat),z1_trigo_deltat,0)*Kn(z1_trigo_deltat,0);
}
else {
for (int ibin=0; ibin<5; ++ibin) {
for (int k=0; k<4; ++k) {
for (int i=0; i<(k+1); ++i) {
for (int j=0; j<(i+1); ++j) {
integral_conv_exp_hyper_plus_deltat += pycuda::real(spl_coef_array_deltat[ibin][k][i][j]*Mn(spl_knot_x1_vector_deltat[ibin],spl_knot_x1_vector_deltat[ibin+1],z1_hyper_plus_deltat,i-j)*Kn(z1_hyper_plus_deltat,j));
integral_conv_exp_hyper_minus_deltat += pycuda::real(spl_coef_array_deltat[ibin][k][i][j]*Mn(spl_knot_x1_vector_deltat[ibin],spl_knot_x1_vector_deltat[ibin+1],z1_hyper_minus_deltat,i-j)*Kn(z1_hyper_minus_deltat,j));
integral_conv_exp_trigo_deltat += spl_coef_array_deltat[ibin][k][i][j]*Mn(spl_knot_x1_vector_deltat[ibin],spl_knot_x1_vector_deltat[ibin+1],z1_trigo_deltat,i-j)*Kn(z1_trigo_deltat,j);
}
}
}
}
}
IT_cosh_temp_deltat[icat][iev] = 0.5*(integral_conv_exp_hyper_plus_deltat + integral_conv_exp_hyper_minus_deltat);
IT_sinh_temp_deltat[icat][iev] = 0.5*(integral_conv_exp_hyper_plus_deltat - integral_conv_exp_hyper_minus_deltat);
IT_cos_temp_deltat[icat][iev] = pycuda::real(integral_conv_exp_trigo_deltat);
IT_sin_temp_deltat[icat][iev] = pycuda::imag(integral_conv_exp_trigo_deltat);
}
__global__ void evaluate(double *data, double *out, double *check, double *options, double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params, double *mass_integrals, int Nevt) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= Nevt) { return;}
int cat_index;
if (options[1]==0) {
if (options[2]==0) {cat_index = 0;}
else {cat_index = 1;}
}
else {
if (options[2]==0) {cat_index = 2;}
else {cat_index = 3;}
}
set_buffer_options(options,cat_index);
set_buffer_amplitudes(re_amps,dirCP_asyms,im_amps,weak_phases,mixing_params,calib_params);
int i0 = row*12;
decision_SSK[cat_index][row] = (int) data[0 + i0];
decision_OS[cat_index][row] = (int) data[1 + i0];
etamistag_SSK[cat_index][row] = data[2 + i0];
etamistag_OS[cat_index][row] = data[3 + i0];
m1[cat_index][row] = data[4 + i0];
m2[cat_index][row] = data[5 + i0];
cos1[cat_index][row] = data[6 + i0];
cos2[cat_index][row] = data[7 + i0];
phi[cat_index][row] = data[8 + i0];
t[cat_index][row] = data[9 + i0];
t_err[cat_index][row] = data[10 + i0];
double ev_weight = data[11 + i0];
double xm1 = (m1[cat_index][row]-1175.)/425.;
double modulus1 = 1.+c1_mass_swave*xm1+c2_mass_swave*(2.*xm1*xm1-1.)+c3_mass_swave*(4.*xm1*xm1*xm1-3.*xm1)+c4_mass_swave*(8.*xm1*xm1*xm1*xm1-8.*xm1*xm1+1.);
double xm2 = (m2[cat_index][row]-1175.)/425.;
double modulus2 = 1.+c1_mass_swave*xm2+c2_mass_swave*(2.*xm2*xm2-1.)+c3_mass_swave*(4.*xm2*xm2*xm2-3.*xm2)+c4_mass_swave*(8.*xm2*xm2*xm2*xm2-8.*xm2*xm2+1.);
if (modulus1<0 or modulus2<0) {out[row] = -10000000000;}
else {
set_buffer_differential_terms(mass_integrals,cat_index,row);
set_buffer_integral_terms(cat_index,row);
double num_fit_temp = num_fit(cat_index,row);
double den_fit_temp = den_fit(cat_index,row);
if (num_fit_temp/den_fit_temp<=0) {out[row] = -10000000000;}
else {out[row] = ev_weight*(log(num_fit_temp/den_fit_temp));}
}
/*set_buffer_differential_terms(mass_integrals,cat_index,row);
set_buffer_integral_terms(cat_index,row);
double num_fit_temp = num_fit(cat_index,row);
double den_fit_temp = den_fit(cat_index,row);
if (num_fit_temp/den_fit_temp<=0) {out[row] = -10000000000;}
else {out[row] = ev_weight*(log(num_fit_temp/den_fit_temp));}*/
}
// ##########################################
// Event generator
__device__ double Ifi(int i) {
if (i == 1) {return If1;}
else if (i == 2) {return If2;}
else if (i == 3) {return If3;}
else if (i == 4) {return If4;}
else if (i == 5) {return If5;}
else if (i == 6) {return If6;}
else if (i == 7) {return If7;}
else if (i == 8) {return If8;}
else if (i == 9) {return If9;}
else if (i == 10) {return If10;}
else if (i == 11) {return If11;}
else if (i == 12) {return If12;}
else if (i == 13) {return If13;}
else if (i == 14) {return If14;}
else if (i == 15) {return If15;}
else if (i == 16) {return If16;}
else if (i == 17) {return If17;}
else if (i == 18) {return If18;}
return 0.;
}
__device__ double Igi(int i) {
if (i == 1) {return 2.*pi;}
else if (i == 2) {return 0.;}
else if (i == 3) {return 0.;}
else if (i == 4) {return pi;}
else if (i == 5) {return 0;}
else if (i == 6) {return pi;}
else if (i == 7) {return 0.;}
else if (i == 8) {return 0.;}
else if (i == 9) {return 0.;}
else if (i == 10) {return 0.;}
else if (i == 11) {return 0.;}
else if (i == 12) {return 0.;}
else if (i == 13) {return pi;}
else if (i == 14) {return 0.;}
else if (i == 15) {return pi;}
return 0.;
}
__device__ double Ifjjphhp(int j, int jp, int h, int hp) {
return Ifi(fjjphhpindexdict[j][jp][h][hp]);
}
__device__ double Ighhp(int h, int hp) {
return Igi(ghhpindexdict[h][hp]);
}
__device__ double Gaus(double x, double mean, double sigma, bool norm = 0) {
if (sigma == 0) return 1.e30;
double arg = (x-mean)/sigma;
double res = exp(-0.5*arg*arg);
if (!norm) return res;
return res/(2.50662827463100024*sigma);
}
__device__ double ran_gamma(hiprandState_t localState, double a, double b) {
if (a < 1){
double u = hiprand_uniform_double(&localState);
return ran_gamma (localState, 1.0 + a, b) * pow (u, 1.0 / a);
}
{
double x, v, u;
double d = a - 1.0 / 3.0;
double c = (1.0 / 3.0) / sqrt (d);
while (1){
do{
x = hiprand_normal_double(&localState);
v = 1.0 + c * x;
}
while (v <= 0);
v = v * v * v;
u = hiprand_uniform_double(&localState);
if (u < 1 - 0.0331 * x * x * x * x)
break;
if (log (u) < 0.5 * x * x + d * (1 - v + log (v)))
break;
}
return b * d * v;
}
}
__device__ double ran_P_deltat(hiprandState_t localState) {
double result;
while (1) {
result = ran_gamma(localState,gamma1_dt,beta1_dt);
if (result>=0. and result<=0.1) {break;}
}
return result;
}
__device__ double P_eta_SSK(double eta) {
if (eta < 0.5) {return c_SSK*Gaus(eta,mu1_SSK,sigma1_SSK)+(1.-c_SSK)*Gaus(eta,mu2_SSK,sigma2_SSK);}
else {return 0.;}
}
__device__ double P_eta_OS(double eta) {
if (eta < 0.5) {return c_OS*Gaus(eta,mu1_OS,sigma1_OS)+(1.-c_OS)*Gaus(eta,mu2_OS,sigma2_OS);}
else {return 0.;}
}
__global__ void get_max_P_eta_SSK(double *out) {
double funmax = 0;
double etavar;
double funvar;
for (int i=0; i<10000; i++) {
etavar = i/20000.;
funvar = P_eta_SSK(etavar);
if (funvar > funmax) {funmax = funvar;}
}
out[0] = 1.1*funmax;
return;
}
__global__ void get_max_P_eta_OS(double *out) {
double funmax = 0;
double etavar;
double funvar;
for (int i=0; i<10000; i++) {
etavar = i/20000.;
funvar = P_eta_OS(etavar);
if (funvar > funmax) {funmax = funvar;}
}
out[0] = 1.1*funmax;
return;
}
__global__ void set_generator(double *options, double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params, double *cond_distr_params, double *mass_integrals, double *ang_integrals) {
set_buffer_options(options,0);
year_opt[0] = 0;
trig_opt[0] = 0;
set_buffer_amplitudes(re_amps,dirCP_asyms,im_amps,weak_phases,mixing_params,calib_params);
tag_eff_SSK = cond_distr_params[0];
mu1_SSK = cond_distr_params[1];
sigma1_SSK = cond_distr_params[2];
c_SSK = cond_distr_params[3];
mu2_SSK = cond_distr_params[4];
sigma2_SSK = cond_distr_params[5];
tag_eff_OS = cond_distr_params[6];
mu1_OS = cond_distr_params[7];
sigma1_OS = cond_distr_params[8];
c_OS = cond_distr_params[9];
mu2_OS = cond_distr_params[10];
sigma2_OS = cond_distr_params[11];
gamma1_dt = cond_distr_params[12];
beta1_dt = cond_distr_params[13];
c_dt = cond_distr_params[14];
gamma2_dt = cond_distr_params[15];
beta2_dt = cond_distr_params[16];
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
Ih1Re = mass_integrals[9];
Ih2Re = mass_integrals[10];
Ih3Re = mass_integrals[11];
Ih4Re = mass_integrals[12];
Ih5Re = mass_integrals[13];
Ih6Re = mass_integrals[14];
Ih7Re = mass_integrals[15];
Ih8Re = mass_integrals[16];
Ih9Re = mass_integrals[17];
Ih10Re = mass_integrals[18];
Ih11Re = mass_integrals[19];
Ih12Re = mass_integrals[20];
Ih13Re = mass_integrals[21];
Ih14Re = mass_integrals[22];
Ih15Re = mass_integrals[23];
Ih16Re = mass_integrals[24];
Ih17Re = mass_integrals[25];
Ih18Re = mass_integrals[26];
Ih19Re = mass_integrals[27];
Ih20Re = mass_integrals[28];
Ih21Re = mass_integrals[29];
Ih22Re = mass_integrals[30];
Ih23Re = mass_integrals[31];
Ih24Re = mass_integrals[32];
Ih25Re = mass_integrals[33];
Ih26Re = mass_integrals[34];
Ih27Re = mass_integrals[35];
Ih28Re = mass_integrals[36];
Ih29Re = mass_integrals[37];
Ih30Re = mass_integrals[38];
Ih1Im = mass_integrals[39];
Ih2Im = mass_integrals[40];
Ih3Im = mass_integrals[41];
Ih4Im = mass_integrals[42];
Ih5Im = mass_integrals[43];
Ih6Im = mass_integrals[44];
Ih7Im = mass_integrals[45];
Ih8Im = mass_integrals[46];
Ih9Im = mass_integrals[47];
Ih10Im = mass_integrals[48];
Ih11Im = mass_integrals[49];
Ih12Im = mass_integrals[50];
Ih13Im = mass_integrals[51];
Ih14Im = mass_integrals[52];
Ih15Im = mass_integrals[53];
Ih16Im = mass_integrals[54];
Ih17Im = mass_integrals[55];
Ih18Im = mass_integrals[56];
Ih19Im = mass_integrals[57];
Ih20Im = mass_integrals[58];
Ih21Im = mass_integrals[59];
If1 = ang_integrals[0];
If2 = ang_integrals[1];
If3 = ang_integrals[2];
If4 = ang_integrals[3];
If5 = ang_integrals[4];
If6 = ang_integrals[5];
If7 = ang_integrals[6];
If8 = ang_integrals[7];
If9 = ang_integrals[8];
If10 = ang_integrals[9];
If11 = ang_integrals[10];
If12 = ang_integrals[11];
If13 = ang_integrals[12];
If14 = ang_integrals[13];
If15 = ang_integrals[14];
If16 = ang_integrals[15];
If17 = ang_integrals[16];
If18 = ang_integrals[17];
reIhj1j2j1pj2pdict[0][0][0][0] = pycuda::real(pycuda::complex<double>(Ih22Re,0.));
reIhj1j2j1pj2pdict[0][0][0][1] = pycuda::real(pycuda::complex<double>(Ih1Re,Ih1Im));
reIhj1j2j1pj2pdict[0][0][0][2] = pycuda::real(pycuda::complex<double>(Ih2Re,Ih2Im));
reIhj1j2j1pj2pdict[0][0][1][0] = pycuda::real(pycuda::complex<double>(Ih1Re,Ih1Im));
reIhj1j2j1pj2pdict[0][0][1][1] = pycuda::real(pycuda::complex<double>(Ih3Re,Ih3Im));
reIhj1j2j1pj2pdict[0][0][1][2] = pycuda::real(pycuda::complex<double>(Ih4Re,Ih4Im));
reIhj1j2j1pj2pdict[0][0][2][0] = pycuda::real(pycuda::complex<double>(Ih2Re,Ih2Im));
reIhj1j2j1pj2pdict[0][0][2][1] = pycuda::real(pycuda::complex<double>(Ih4Re,Ih4Im));
reIhj1j2j1pj2pdict[0][0][2][2] = pycuda::real(pycuda::complex<double>(Ih5Re,Ih5Im));
reIhj1j2j1pj2pdict[0][1][0][0] = pycuda::real(pycuda::complex<double>(Ih1Re,-Ih1Im));
reIhj1j2j1pj2pdict[0][1][0][1] = pycuda::real(pycuda::complex<double>(Ih23Re,0.));
reIhj1j2j1pj2pdict[0][1][0][2] = pycuda::real(pycuda::complex<double>(Ih6Re,Ih6Im));
reIhj1j2j1pj2pdict[0][1][1][0] = pycuda::real(pycuda::complex<double>(Ih7Re,Ih7Im));
reIhj1j2j1pj2pdict[0][1][1][1] = pycuda::real(pycuda::complex<double>(Ih8Re,Ih8Im));
reIhj1j2j1pj2pdict[0][1][1][2] = pycuda::real(pycuda::complex<double>(Ih9Re,Ih9Im));
reIhj1j2j1pj2pdict[0][1][2][0] = pycuda::real(pycuda::complex<double>(Ih10Re,Ih10Im));
reIhj1j2j1pj2pdict[0][1][2][1] = pycuda::real(pycuda::complex<double>(Ih11Re,Ih11Im));
reIhj1j2j1pj2pdict[0][1][2][2] = pycuda::real(pycuda::complex<double>(Ih12Re,Ih12Im));
reIhj1j2j1pj2pdict[0][2][0][0] = pycuda::real(pycuda::complex<double>(Ih2Re,-Ih2Im));
reIhj1j2j1pj2pdict[0][2][0][1] = pycuda::real(pycuda::complex<double>(Ih6Re,-Ih6Im));
reIhj1j2j1pj2pdict[0][2][0][2] = pycuda::real(pycuda::complex<double>(Ih25Re,0.));
reIhj1j2j1pj2pdict[0][2][1][0] = pycuda::real(pycuda::complex<double>(Ih10Re,-Ih10Im));
reIhj1j2j1pj2pdict[0][2][1][1] = pycuda::real(pycuda::complex<double>(Ih13Re,Ih13Im));
reIhj1j2j1pj2pdict[0][2][1][2] = pycuda::real(pycuda::complex<double>(Ih14Re,Ih14Im));
reIhj1j2j1pj2pdict[0][2][2][0] = pycuda::real(pycuda::complex<double>(Ih15Re,Ih15Im));
reIhj1j2j1pj2pdict[0][2][2][1] = pycuda::real(pycuda::complex<double>(Ih16Re,Ih16Im));
reIhj1j2j1pj2pdict[0][2][2][2] = pycuda::real(pycuda::complex<double>(Ih17Re,Ih17Im));
reIhj1j2j1pj2pdict[1][0][0][0] = pycuda::real(pycuda::complex<double>(Ih1Re,-Ih1Im));
reIhj1j2j1pj2pdict[1][0][0][1] = pycuda::real(pycuda::complex<double>(Ih7Re,Ih7Im));
reIhj1j2j1pj2pdict[1][0][0][2] = pycuda::real(pycuda::complex<double>(Ih10Re,Ih10Im));
reIhj1j2j1pj2pdict[1][0][1][0] = pycuda::real(pycuda::complex<double>(Ih24Re,0.));
reIhj1j2j1pj2pdict[1][0][1][1] = pycuda::real(pycuda::complex<double>(Ih8Re,Ih8Im));
reIhj1j2j1pj2pdict[1][0][1][2] = pycuda::real(pycuda::complex<double>(Ih11Re,Ih11Im));
reIhj1j2j1pj2pdict[1][0][2][0] = pycuda::real(pycuda::complex<double>(Ih6Re,Ih6Im));
reIhj1j2j1pj2pdict[1][0][2][1] = pycuda::real(pycuda::complex<double>(Ih9Re,Ih9Im));
reIhj1j2j1pj2pdict[1][0][2][2] = pycuda::real(pycuda::complex<double>(Ih12Re,Ih12Im));
reIhj1j2j1pj2pdict[1][1][0][0] = pycuda::real(pycuda::complex<double>(Ih3Re,-Ih3Im));
reIhj1j2j1pj2pdict[1][1][0][1] = pycuda::real(pycuda::complex<double>(Ih8Re,-Ih8Im));
reIhj1j2j1pj2pdict[1][1][0][2] = pycuda::real(pycuda::complex<double>(Ih13Re,-Ih13Im));
reIhj1j2j1pj2pdict[1][1][1][0] = pycuda::real(pycuda::complex<double>(Ih8Re,-Ih8Im));
reIhj1j2j1pj2pdict[1][1][1][1] = pycuda::real(pycuda::complex<double>(Ih27Re,0.));
reIhj1j2j1pj2pdict[1][1][1][2] = pycuda::real(pycuda::complex<double>(Ih18Re,Ih18Im));
reIhj1j2j1pj2pdict[1][1][2][0] = pycuda::real(pycuda::complex<double>(Ih13Re,-Ih13Im));
reIhj1j2j1pj2pdict[1][1][2][1] = pycuda::real(pycuda::complex<double>(Ih18Re,Ih18Im));
reIhj1j2j1pj2pdict[1][1][2][2] = pycuda::real(pycuda::complex<double>(Ih19Re,Ih19Im));
reIhj1j2j1pj2pdict[1][2][0][0] = pycuda::real(pycuda::complex<double>(Ih4Re,-Ih4Im));
reIhj1j2j1pj2pdict[1][2][0][1] = pycuda::real(pycuda::complex<double>(Ih9Re,-Ih9Im));
reIhj1j2j1pj2pdict[1][2][0][2] = pycuda::real(pycuda::complex<double>(Ih14Re,-Ih14Im));
reIhj1j2j1pj2pdict[1][2][1][0] = pycuda::real(pycuda::complex<double>(Ih11Re,-Ih11Im));
reIhj1j2j1pj2pdict[1][2][1][1] = pycuda::real(pycuda::complex<double>(Ih18Re,-Ih18Im));
reIhj1j2j1pj2pdict[1][2][1][2] = pycuda::real(pycuda::complex<double>(Ih28Re,0.));
reIhj1j2j1pj2pdict[1][2][2][0] = pycuda::real(pycuda::complex<double>(Ih16Re,-Ih16Im));
reIhj1j2j1pj2pdict[1][2][2][1] = pycuda::real(pycuda::complex<double>(Ih20Re,Ih20Im));
reIhj1j2j1pj2pdict[1][2][2][2] = pycuda::real(pycuda::complex<double>(Ih21Re,Ih21Im));
reIhj1j2j1pj2pdict[2][0][0][0] = pycuda::real(pycuda::complex<double>(Ih2Re,-Ih2Im));
reIhj1j2j1pj2pdict[2][0][0][1] = pycuda::real(pycuda::complex<double>(Ih10Re,-Ih10Im));
reIhj1j2j1pj2pdict[2][0][0][2] = pycuda::real(pycuda::complex<double>(Ih15Re,Ih15Im));
reIhj1j2j1pj2pdict[2][0][1][0] = pycuda::real(pycuda::complex<double>(Ih6Re,-Ih6Im));
reIhj1j2j1pj2pdict[2][0][1][1] = pycuda::real(pycuda::complex<double>(Ih13Re,Ih13Im));
reIhj1j2j1pj2pdict[2][0][1][2] = pycuda::real(pycuda::complex<double>(Ih16Re,Ih16Im));
reIhj1j2j1pj2pdict[2][0][2][0] = pycuda::real(pycuda::complex<double>(Ih26Re,0.));
reIhj1j2j1pj2pdict[2][0][2][1] = pycuda::real(pycuda::complex<double>(Ih14Re,Ih14Im));
reIhj1j2j1pj2pdict[2][0][2][2] = pycuda::real(pycuda::complex<double>(Ih17Re,Ih17Im));
reIhj1j2j1pj2pdict[2][1][0][0] = pycuda::real(pycuda::complex<double>(Ih4Re,-Ih4Im));
reIhj1j2j1pj2pdict[2][1][0][1] = pycuda::real(pycuda::complex<double>(Ih11Re,-Ih11Im));
reIhj1j2j1pj2pdict[2][1][0][2] = pycuda::real(pycuda::complex<double>(Ih16Re,-Ih16Im));
reIhj1j2j1pj2pdict[2][1][1][0] = pycuda::real(pycuda::complex<double>(Ih9Re,-Ih9Im));
reIhj1j2j1pj2pdict[2][1][1][1] = pycuda::real(pycuda::complex<double>(Ih18Re,-Ih18Im));
reIhj1j2j1pj2pdict[2][1][1][2] = pycuda::real(pycuda::complex<double>(Ih20Re,Ih20Im));
reIhj1j2j1pj2pdict[2][1][2][0] = pycuda::real(pycuda::complex<double>(Ih14Re,-Ih14Im));
reIhj1j2j1pj2pdict[2][1][2][1] = pycuda::real(pycuda::complex<double>(Ih29Re,0.));
reIhj1j2j1pj2pdict[2][1][2][2] = pycuda::real(pycuda::complex<double>(Ih21Re,Ih21Im));
reIhj1j2j1pj2pdict[2][2][0][0] = pycuda::real(pycuda::complex<double>(Ih5Re,-Ih5Im));
reIhj1j2j1pj2pdict[2][2][0][1] = pycuda::real(pycuda::complex<double>(Ih12Re,-Ih12Im));
reIhj1j2j1pj2pdict[2][2][0][2] = pycuda::real(pycuda::complex<double>(Ih17Re,-Ih17Im));
reIhj1j2j1pj2pdict[2][2][1][0] = pycuda::real(pycuda::complex<double>(Ih12Re,-Ih12Im));
reIhj1j2j1pj2pdict[2][2][1][1] = pycuda::real(pycuda::complex<double>(Ih19Re,-Ih19Im));
reIhj1j2j1pj2pdict[2][2][1][2] = pycuda::real(pycuda::complex<double>(Ih21Re,-Ih21Im));
reIhj1j2j1pj2pdict[2][2][2][0] = pycuda::real(pycuda::complex<double>(Ih17Re,-Ih17Im));
reIhj1j2j1pj2pdict[2][2][2][1] = pycuda::real(pycuda::complex<double>(Ih21Re,-Ih21Im));
reIhj1j2j1pj2pdict[2][2][2][2] = pycuda::real(pycuda::complex<double>(Ih30Re,0.));
imIhj1j2j1pj2pdict[0][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih22Re,0.));
imIhj1j2j1pj2pdict[0][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih1Re,Ih1Im));
imIhj1j2j1pj2pdict[0][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih2Re,Ih2Im));
imIhj1j2j1pj2pdict[0][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,Ih1Im));
imIhj1j2j1pj2pdict[0][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih3Re,Ih3Im));
imIhj1j2j1pj2pdict[0][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih4Re,Ih4Im));
imIhj1j2j1pj2pdict[0][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,Ih2Im));
imIhj1j2j1pj2pdict[0][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih4Re,Ih4Im));
imIhj1j2j1pj2pdict[0][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih5Re,Ih5Im));
imIhj1j2j1pj2pdict[0][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,-Ih1Im));
imIhj1j2j1pj2pdict[0][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih23Re,0.));
imIhj1j2j1pj2pdict[0][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih6Re,Ih6Im));
imIhj1j2j1pj2pdict[0][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih7Re,Ih7Im));
imIhj1j2j1pj2pdict[0][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,Ih8Im));
imIhj1j2j1pj2pdict[0][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih9Re,Ih9Im));
imIhj1j2j1pj2pdict[0][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih10Re,Ih10Im));
imIhj1j2j1pj2pdict[0][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih11Re,Ih11Im));
imIhj1j2j1pj2pdict[0][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih12Re,Ih12Im));
imIhj1j2j1pj2pdict[0][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,-Ih2Im));
imIhj1j2j1pj2pdict[0][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih6Re,-Ih6Im));
imIhj1j2j1pj2pdict[0][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih25Re,0.));
imIhj1j2j1pj2pdict[0][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih10Re,-Ih10Im));
imIhj1j2j1pj2pdict[0][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih13Re,Ih13Im));
imIhj1j2j1pj2pdict[0][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih14Re,Ih14Im));
imIhj1j2j1pj2pdict[0][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih15Re,Ih15Im));
imIhj1j2j1pj2pdict[0][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih16Re,Ih16Im));
imIhj1j2j1pj2pdict[0][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,Ih17Im));
imIhj1j2j1pj2pdict[1][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,-Ih1Im));
imIhj1j2j1pj2pdict[1][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih7Re,Ih7Im));
imIhj1j2j1pj2pdict[1][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih10Re,Ih10Im));
imIhj1j2j1pj2pdict[1][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih24Re,0.));
imIhj1j2j1pj2pdict[1][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,Ih8Im));
imIhj1j2j1pj2pdict[1][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih11Re,Ih11Im));
imIhj1j2j1pj2pdict[1][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih6Re,Ih6Im));
imIhj1j2j1pj2pdict[1][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih9Re,Ih9Im));
imIhj1j2j1pj2pdict[1][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih12Re,Ih12Im));
imIhj1j2j1pj2pdict[1][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih3Re,-Ih3Im));
imIhj1j2j1pj2pdict[1][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,-Ih8Im));
imIhj1j2j1pj2pdict[1][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih13Re,-Ih13Im));
imIhj1j2j1pj2pdict[1][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih8Re,-Ih8Im));
imIhj1j2j1pj2pdict[1][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih27Re,0.));
imIhj1j2j1pj2pdict[1][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih18Re,Ih18Im));
imIhj1j2j1pj2pdict[1][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih13Re,-Ih13Im));
imIhj1j2j1pj2pdict[1][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,Ih18Im));
imIhj1j2j1pj2pdict[1][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih19Re,Ih19Im));
imIhj1j2j1pj2pdict[1][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih4Re,-Ih4Im));
imIhj1j2j1pj2pdict[1][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih9Re,-Ih9Im));
imIhj1j2j1pj2pdict[1][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih14Re,-Ih14Im));
imIhj1j2j1pj2pdict[1][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih11Re,-Ih11Im));
imIhj1j2j1pj2pdict[1][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,-Ih18Im));
imIhj1j2j1pj2pdict[1][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih28Re,0.));
imIhj1j2j1pj2pdict[1][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih16Re,-Ih16Im));
imIhj1j2j1pj2pdict[1][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih20Re,Ih20Im));
imIhj1j2j1pj2pdict[1][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,Ih21Im));
imIhj1j2j1pj2pdict[2][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,-Ih2Im));
imIhj1j2j1pj2pdict[2][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih10Re,-Ih10Im));
imIhj1j2j1pj2pdict[2][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih15Re,Ih15Im));
imIhj1j2j1pj2pdict[2][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih6Re,-Ih6Im));
imIhj1j2j1pj2pdict[2][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih13Re,Ih13Im));
imIhj1j2j1pj2pdict[2][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih16Re,Ih16Im));
imIhj1j2j1pj2pdict[2][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih26Re,0.));
imIhj1j2j1pj2pdict[2][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih14Re,Ih14Im));
imIhj1j2j1pj2pdict[2][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,Ih17Im));
imIhj1j2j1pj2pdict[2][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih4Re,-Ih4Im));
imIhj1j2j1pj2pdict[2][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih11Re,-Ih11Im));
imIhj1j2j1pj2pdict[2][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih16Re,-Ih16Im));
imIhj1j2j1pj2pdict[2][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih9Re,-Ih9Im));
imIhj1j2j1pj2pdict[2][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,-Ih18Im));
imIhj1j2j1pj2pdict[2][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih20Re,Ih20Im));
imIhj1j2j1pj2pdict[2][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih14Re,-Ih14Im));
imIhj1j2j1pj2pdict[2][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih29Re,0.));
imIhj1j2j1pj2pdict[2][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,Ih21Im));
imIhj1j2j1pj2pdict[2][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih5Re,-Ih5Im));
imIhj1j2j1pj2pdict[2][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih12Re,-Ih12Im));
imIhj1j2j1pj2pdict[2][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,-Ih17Im));
imIhj1j2j1pj2pdict[2][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih12Re,-Ih12Im));
imIhj1j2j1pj2pdict[2][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih19Re,-Ih19Im));
imIhj1j2j1pj2pdict[2][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,-Ih21Im));
imIhj1j2j1pj2pdict[2][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih17Re,-Ih17Im));
imIhj1j2j1pj2pdict[2][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih21Re,-Ih21Im));
imIhj1j2j1pj2pdict[2][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih30Re,0.));
}
__device__ void set_buffer_differential_terms_gen(int iev) {
double f1,f2,s1,s2,x1,x2;
if (acctype == 3) {
f1 = 1.;
f2 = 0.;
s1 = p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12);
//s1 = (p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12))*1.1779041429731925;
//s1 = (p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12))*t_err[0][iev];
s2 = 1.;
x1 = t[0][iev]/(sqrt(2.)*s1);
x2 = t[0][iev]/(sqrt(2.)*s2);
}
else {
f1 = 1.;
f2 = 0.;
if (year_opt == 0) {s1 = p0_tres_11+p1_tres_11*(t_err[0][iev]-deltatmean_tres_11);}
else {s1 = p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12);}
//if (year_opt == 0) {s1 = (p0_tres_11+p1_tres_11*(t_err[0][iev]-deltatmean_tres_11))*1.1779041429731925;}
//else {s1 = (p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12))*1.1779041429731925;}
//if (year_opt == 0) {s1 = (p0_tres_11+p1_tres_11*(t_err[0][iev]-deltatmean_tres_11))*t_err[0][iev];}
//else {s1 = (p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12))*t_err[0][iev];}
s2 = 1.;
x1 = t[0][iev]/(sqrt(2.)*s1);
x2 = t[0][iev]/(sqrt(2.)*s2);
}
pycuda::complex<double> z1_hyper_plus = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z2_hyper_plus = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_hyper_minus = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z2_hyper_minus = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_trigo = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
pycuda::complex<double> z2_trigo = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
double conv_exp_hyper_plus = pycuda::real(f1*conv_exp(x1,z1_hyper_plus)+f2*conv_exp(x2,z2_hyper_plus));
double conv_exp_hyper_minus = pycuda::real(f1*conv_exp(x1,z1_hyper_minus)+f2*conv_exp(x2,z2_hyper_minus));
pycuda::complex<double> conv_exp_trigo = f1*conv_exp(x1,z1_trigo)+f2*conv_exp(x2,z2_trigo);
T_cosh_temp[0][iev] = 0.5*(conv_exp_hyper_plus + conv_exp_hyper_minus);
T_sinh_temp[0][iev] = 0.5*(conv_exp_hyper_plus - conv_exp_hyper_minus);
T_cos_temp[0][iev] = pycuda::real(conv_exp_trigo);
T_sin_temp[0][iev] = pycuda::imag(conv_exp_trigo);
zeta_temp[0][iev] = zeta(decision_SSK[0][iev],decision_OS[0][iev],etamistag_SSK[0][iev],etamistag_OS[0][iev]);
DCP_tzero_temp[0][iev] = DCP_tzero(decision_SSK[0][iev],decision_OS[0][iev],etamistag_SSK[0][iev],etamistag_OS[0][iev]);
for (int i=0; i<18; ++i) {fi_cos1_temp[i][0][iev] = fi(cos1[0][iev],i+1);}
for (int i=0; i<18; ++i) {fi_cos2_temp[i][0][iev] = fi(cos2[0][iev],i+1);}
for (int i=0; i<15; ++i) {gi_temp[i][0][iev] = gi(phi[0][iev],i+1);}
for (int j1=0; j1<3; ++j1) {
for (int j2=0; j2<3; ++j2) {
pycuda::complex<double> M_temp = Mj1j2(m1[0][iev],m2[0][iev],j1,j2);
reMj1j2_temp[j1][j2][0][iev] = pycuda::real(M_temp);
imMj1j2_temp[j1][j2][0][iev] = pycuda::imag(M_temp);
}
}
phasespace_temp[0][iev] = phasespace(m1[0][iev],m2[0][iev]);
}
__device__ double comp_den_toy(int j1, int j2, int h, int j1p, int j2p, int hp, int iev) {
return pycuda::real(ITj1j2hj1pj2php_deltat(j1,j2,h,j1p,j2p,hp,0,iev)*Nj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp)*Ihj1j2j1pj2p(j1,j2,j1p,j2p))*Ighhp(h,hp)*Ifjjphhp(j1,j1p,h,hp)*Ifjjphhp(j2,j2p,h,hp);
}
__device__ double den_toy(int iev) {
return comp_den_toy(0,0,0,0,0,0,iev)+comp_den_toy(0,1,0,0,1,0,iev)+comp_den_toy(0,2,0,0,2,0,iev)+comp_den_toy(1,0,0,1,0,0,iev)+comp_den_toy(1,1,0,1,1,0,iev)
+comp_den_toy(1,1,1,1,1,1,iev)+comp_den_toy(1,1,2,1,1,2,iev)+comp_den_toy(1,2,0,1,2,0,iev)+comp_den_toy(1,2,1,1,2,1,iev)+comp_den_toy(1,2,2,1,2,2,iev)
+comp_den_toy(2,0,0,2,0,0,iev)+comp_den_toy(2,1,0,2,1,0,iev)+comp_den_toy(2,1,1,2,1,1,iev)+comp_den_toy(2,1,2,2,1,2,iev)+comp_den_toy(2,2,0,2,2,0,iev)
+comp_den_toy(2,2,1,2,2,1,iev)+comp_den_toy(2,2,2,2,2,2,iev)+comp_den_toy(2,2,3,2,2,3,iev)+comp_den_toy(2,2,4,2,2,4,iev)+2.*comp_den_toy(0,1,0,0,0,0,iev)
+2.*comp_den_toy(0,1,0,1,0,0,iev)+2.*comp_den_toy(0,1,0,2,0,0,iev)+2.*comp_den_toy(0,2,0,0,0,0,iev)+2.*comp_den_toy(0,2,0,0,1,0,iev)+2.*comp_den_toy(0,2,0,1,0,0,iev)
+2.*comp_den_toy(0,2,0,1,1,0,iev)+2.*comp_den_toy(0,2,0,2,0,0,iev)+2.*comp_den_toy(0,2,0,2,1,0,iev)+2.*comp_den_toy(1,0,0,0,0,0,iev)+2.*comp_den_toy(1,1,0,0,0,0,iev)
+2.*comp_den_toy(1,1,0,0,1,0,iev)+2.*comp_den_toy(1,1,0,1,0,0,iev)+2.*comp_den_toy(1,1,0,2,0,0,iev)+2.*comp_den_toy(1,2,0,0,0,0,iev)+2.*comp_den_toy(1,2,0,0,1,0,iev)
+2.*comp_den_toy(1,2,0,0,2,0,iev)+2.*comp_den_toy(1,2,0,1,0,0,iev)+2.*comp_den_toy(1,2,0,1,1,0,iev)+2.*comp_den_toy(1,2,0,2,0,0,iev)+2.*comp_den_toy(1,2,0,2,1,0,iev)
+2.*comp_den_toy(1,2,1,1,1,1,iev)+2.*comp_den_toy(1,2,1,2,1,1,iev)+2.*comp_den_toy(1,2,2,1,1,2,iev)+2.*comp_den_toy(1,2,2,2,1,2,iev)+2.*comp_den_toy(2,0,0,0,0,0,iev)
+2.*comp_den_toy(2,0,0,1,0,0,iev)+2.*comp_den_toy(2,1,0,0,0,0,iev)+2.*comp_den_toy(2,1,0,0,1,0,iev)+2.*comp_den_toy(2,1,0,1,0,0,iev)+2.*comp_den_toy(2,1,0,1,1,0,iev)
+2.*comp_den_toy(2,1,0,2,0,0,iev)+2.*comp_den_toy(2,1,1,1,1,1,iev)+2.*comp_den_toy(2,1,2,1,1,2,iev)+2.*comp_den_toy(2,2,0,0,0,0,iev)+2.*comp_den_toy(2,2,0,0,1,0,iev)
+2.*comp_den_toy(2,2,0,0,2,0,iev)+2.*comp_den_toy(2,2,0,1,0,0,iev)+2.*comp_den_toy(2,2,0,1,1,0,iev)+2.*comp_den_toy(2,2,0,1,2,0,iev)+2.*comp_den_toy(2,2,0,2,0,0,iev)
+2.*comp_den_toy(2,2,0,2,1,0,iev)+2.*comp_den_toy(2,2,1,1,1,1,iev)+2.*comp_den_toy(2,2,1,1,2,1,iev)+2.*comp_den_toy(2,2,1,2,1,1,iev)+2.*comp_den_toy(2,2,2,1,1,2,iev)
+2.*comp_den_toy(2,2,2,1,2,2,iev)+2.*comp_den_toy(2,2,2,2,1,2,iev);
}
__global__ void evaluate_CondPDF(double m1_ran, double m2_ran, double cos1_ran, double cos2_ran, double phi_ran, double t_ran, double t_err_ran, int q_SSK_ran, int q_OS_ran, double eta_SSK_ran, double eta_OS_ran, double *mixing_params, double *calib_params, double *out) {
m1[0][0] = m1_ran;
m2[0][0] = m2_ran;
cos1[0][0] = cos1_ran;
cos2[0][0] = cos2_ran;
phi[0][0] = phi_ran;
t[0][0] = t_ran;
t_err[0][0] = t_err_ran;
decision_SSK[0][0] = q_SSK_ran;
decision_OS[0][0] = q_OS_ran;
etamistag_SSK[0][0] = eta_SSK_ran;
etamistag_OS[0][0] = eta_OS_ran;
gamma_Bs_freq = mixing_params[1];
delta_gamma_freq = mixing_params[2];
p0metac_tag_SSK = calib_params[0];
p0metac_tag_OS = calib_params[1];
Dp0half_tag_SSK = calib_params[2];
Dp0half_tag_OS = calib_params[3];
p1_tag_SSK = calib_params[4];
p1_tag_OS = calib_params[5];
Dp1half_tag_SSK = calib_params[6];
Dp1half_tag_OS = calib_params[7];
p0_tres_12 = calib_params[14];
p1_tres_12 = calib_params[15];
c5_mass_swave = calib_params[26];
c6_mass_swave = calib_params[27];
c7_mass_swave = calib_params[28];
c8_mass_swave = calib_params[29];
set_buffer_differential_terms_gen(0);
set_buffer_integral_terms(0,0);
double num_temp = num_fit(0,0)*accGen(t_ran,m1_ran,m2_ran,cos1_ran,cos2_ran,phi_ran);
double den_temp = den_toy(0);
if (num_temp/den_temp<=0) {out[0] = -1.e20;}
else {out[0] = log(num_temp/den_temp);}
}
__global__ void generateEvent(double *gendata, double max_fun_eta_SSK, double max_fun_eta_OS, double max_fun_cond, int Nevt) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= Nevt) { return;}
hiprandState_t state;
hiprand_init((unsigned long long)clock(), row, 0, &state);
// Determination of the per event decay time error.
t_err[0][row] = ran_P_deltat(state);
// Determination of the SSK mistag probability.
double dec_SSK_tagged = hiprand_uniform(&state);
if (dec_SSK_tagged < tag_eff_SSK) {
double etamistag_SSK_ran;
max_fun[row] = max_fun_eta_SSK;
while (1) {
etamistag_SSK_ran = 0.5*hiprand_uniform(&state);
fun_ran[row] = P_eta_SSK(etamistag_SSK_ran);
dec_accepted[row] = max_fun[row]*hiprand_uniform(&state);
if (fun_ran[row] > dec_accepted[row]) {break;}
}
etamistag_SSK[0][row] = etamistag_SSK_ran;
}
else {
etamistag_SSK[0][row] = 0.5;
}
// Determination of the OS mistag probability.
double dec_OS_tagged = hiprand_uniform(&state);
if (dec_OS_tagged < tag_eff_OS) {
double etamistag_OS_ran;
max_fun[row] = max_fun_eta_OS;
while (1) {
etamistag_OS_ran = 0.5*hiprand_uniform(&state);
fun_ran[row] = P_eta_OS(etamistag_OS_ran);
dec_accepted[row] = max_fun[row]*hiprand_uniform(&state);
if (fun_ran[row] > dec_accepted[row]) {break;}
}
etamistag_OS[0][row] = etamistag_OS_ran;
}
else {
etamistag_OS[0][row] = 0.5;
}
// Determination of the decay observables.
max_fun[row] = max_fun_cond;
while (1) {
if (wide_window == 1) {
m1[0][row] = 750.+hiprand_uniform(&state)*(1600.-750.);
m2[0][row] = 750.+hiprand_uniform(&state)*(1600.-750.);
}
else {
m1[0][row] = 750.+hiprand_uniform(&state)*(1050.-750.);
m2[0][row] = 750.+hiprand_uniform(&state)*(1050.-750.);
}
cos1[0][row] = -1.+hiprand_uniform(&state)*2.;
cos2[0][row] = -1.+hiprand_uniform(&state)*2.;
phi[0][row] = hiprand_uniform(&state)*2.*pi;
t[0][row] = hiprand_uniform(&state)*12.;
if (etamistag_SSK[0][row] == 0.5) {decision_SSK[0][row] = 0;}
else {
double d_SSK = hiprand_uniform(&state);
if (d_SSK <= 0.5) {decision_SSK[0][row] = -1;}
else {decision_SSK[0][row] = 1;}
}
if (etamistag_OS[0][row] == 0.5) {decision_OS[0][row] = 0;}
else {
double d_OS = hiprand_uniform(&state);
if (d_OS <= 0.5) {decision_OS[0][row] = -1;}
else {decision_OS[0][row] = 1;}
}
set_buffer_differential_terms_gen(row);
set_buffer_integral_terms(0,row);
dec_accepted[row] = hiprand_uniform(&state);
fun_ran[row] = num_fit(0,row)/den_toy(row)*accGen(t[0][row],m1[0][row],m2[0][row],cos1[0][row],cos2[0][row],phi[0][row])/max_fun[row];
if (fun_ran[row] > dec_accepted[row]) {break;}
}
int i0 = row*12;
gendata[0 + i0] = (double) decision_SSK[0][row];
gendata[1 + i0] = (double) decision_OS[0][row];
gendata[2 + i0] = etamistag_SSK[0][row];
gendata[3 + i0] = etamistag_OS[0][row];
gendata[4 + i0] = m1[0][row];
gendata[5 + i0] = m2[0][row];
gendata[6 + i0] = cos1[0][row];
gendata[7 + i0] = cos2[0][row];
gendata[8 + i0] = phi[0][row];
gendata[9 + i0] = t[0][row];
gendata[10 + i0] = t_err[0][row];
gendata[11 + i0] = 1.;
return;
}
__global__ void evaluate_toy(double *data, double *out, double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params, int Nevt) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= Nevt) { return;}
set_buffer_amplitudes(re_amps,dirCP_asyms,im_amps,weak_phases,mixing_params,calib_params);
int i0 = row*12;
decision_SSK[0][row] = (int) data[0 + i0];
decision_OS[0][row] = (int) data[1 + i0];
etamistag_SSK[0][row] = data[2 + i0];
etamistag_OS[0][row] = data[3 + i0];
m1[0][row] = data[4 + i0];
m2[0][row] = data[5 + i0];
cos1[0][row] = data[6 + i0];
cos2[0][row] = data[7 + i0];
phi[0][row] = data[8 + i0];
t[0][row] = data[9 + i0];
t_err[0][row] = data[10 + i0];
set_buffer_differential_terms_gen(row);
set_buffer_integral_terms(0,row);
double num_fit_temp = num_fit(0,row);
double den_fit_temp = den_toy(row);
if (num_fit_temp/den_fit_temp<=0) {out[row] = -10000000000;}
else {out[row] = log(num_fit_temp/den_fit_temp);}
}
__global__ void set_mass_params(double *calib_params) {
mv = calib_params[16];
ms = calib_params[17];
mt = calib_params[18];
gv = calib_params[19];
gs = calib_params[20];
gt = calib_params[21];
c1_mass_swave = calib_params[22];
c2_mass_swave = calib_params[23];
c3_mass_swave = calib_params[24];
c4_mass_swave = calib_params[25];
c5_mass_swave = calib_params[26];
c6_mass_swave = calib_params[27];
c7_mass_swave = calib_params[28];
c8_mass_swave = calib_params[29];
c9_mass_swave = calib_params[30];
}
__global__ void find_max_mass_pdf(int mpdfid, int mintnpoints, double minthlimit, double *mpdfarray) {
int mintindex = threadIdx.x + blockDim.x * blockIdx.x;
if (mintindex >= mintnpoints*mintnpoints) { return;}
int im1 = mintindex / mintnpoints;
int im2 = mintindex % mintnpoints;
double mintstep = (minthlimit-750.)/mintnpoints;
double m1_ = 750.+im1*mintstep;
double m2_ = 750.+im2*mintstep;
if (mpdfid == 0) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,0))*phasespace(m1_,m2_);}
else if (mpdfid == 1) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,1))*phasespace(m1_,m2_);}
else if (mpdfid == 2) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,0))*phasespace(m1_,m2_);}
else if (mpdfid == 3) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,2))*phasespace(m1_,m2_);}
else if (mpdfid == 4) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,0))*phasespace(m1_,m2_);}
else if (mpdfid == 5) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,1))*phasespace(m1_,m2_);}
else if (mpdfid == 6) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,2))*phasespace(m1_,m2_);}
else if (mpdfid == 7) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,1))*phasespace(m1_,m2_);}
else if (mpdfid == 8) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,2))*phasespace(m1_,m2_);}
}
__global__ void compute_mass_integral(int mpdfid, int npoints, double minthlimit, double maxmpdf, int *mintarray) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= npoints) { return;}
hiprandState_t state;
hiprand_init((unsigned long long)clock(), row, 0, &state);
double m1_ = 750.+hiprand_uniform(&state)*(minthlimit-750.);
double m2_ = 750.+hiprand_uniform(&state)*(minthlimit-750.);
double vertical_ = hiprand_uniform(&state)*maxmpdf;
double mpdf_temp = 0;
if (mpdfid == 0) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,0))*phasespace(m1_,m2_);}
else if (mpdfid == 1) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,1))*phasespace(m1_,m2_);}
else if (mpdfid == 2) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,0))*phasespace(m1_,m2_);}
else if (mpdfid == 3) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,2))*phasespace(m1_,m2_);}
else if (mpdfid == 4) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,0))*phasespace(m1_,m2_);}
else if (mpdfid == 5) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,1))*phasespace(m1_,m2_);}
else if (mpdfid == 6) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,2))*phasespace(m1_,m2_);}
else if (mpdfid == 7) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,1))*phasespace(m1_,m2_);}
else if (mpdfid == 8) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,2))*phasespace(m1_,m2_);}
if (vertical_ <= mpdf_temp) {mintarray[row] = 1;}
else {mintarray[row] = 0;}
}
__global__ void compute_mint_array(int mpdfid, int mintnpoints, double minthlimit, double *mpdfarray) {
int mintindex = threadIdx.x + blockDim.x * blockIdx.x;
if (mintindex >= mintnpoints*mintnpoints) { return;}
int im1 = mintindex / mintnpoints;
int im2 = mintindex % mintnpoints;
double mintstep = (minthlimit-750.)/mintnpoints;
double m1_ = 750.+im1*mintstep;
double m2_ = 750.+im2*mintstep;
double m1next_ = 750.+(im1+1)*mintstep;
double m2next_ = 750.+(im2+1)*mintstep;
double point1 = 0.;
double point2 = 0.;
double point3 = 0.;
double point4 = 0.;
if (mpdfid == 0) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,0))*phasespace(m1_,m2_);}
else if (mpdfid == 1) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,1))*phasespace(m1_,m2_);}
else if (mpdfid == 2) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,0))*phasespace(m1_,m2_);}
else if (mpdfid == 3) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,2))*phasespace(m1_,m2_);}
else if (mpdfid == 4) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,0))*phasespace(m1_,m2_);}
else if (mpdfid == 5) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,1))*phasespace(m1_,m2_);}
else if (mpdfid == 6) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,2))*phasespace(m1_,m2_);}
else if (mpdfid == 7) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,1))*phasespace(m1_,m2_);}
else if (mpdfid == 8) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,2))*phasespace(m1_,m2_);}
if (mpdfid == 0) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,0,0))*phasespace(m1next_,m2_);}
else if (mpdfid == 1) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,0,1))*phasespace(m1next_,m2_);}
else if (mpdfid == 2) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,1,0))*phasespace(m1next_,m2_);}
else if (mpdfid == 3) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,0,2))*phasespace(m1next_,m2_);}
else if (mpdfid == 4) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,2,0))*phasespace(m1next_,m2_);}
else if (mpdfid == 5) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,1,1))*phasespace(m1next_,m2_);}
else if (mpdfid == 6) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,1,2))*phasespace(m1next_,m2_);}
else if (mpdfid == 7) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,2,1))*phasespace(m1next_,m2_);}
else if (mpdfid == 8) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,2,2))*phasespace(m1next_,m2_);}
if (mpdfid == 0) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,0,0))*phasespace(m1_,m2next_);}
else if (mpdfid == 1) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,0,1))*phasespace(m1_,m2next_);}
else if (mpdfid == 2) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,1,0))*phasespace(m1_,m2next_);}
else if (mpdfid == 3) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,0,2))*phasespace(m1_,m2next_);}
else if (mpdfid == 4) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,2,0))*phasespace(m1_,m2next_);}
else if (mpdfid == 5) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,1,1))*phasespace(m1_,m2next_);}
else if (mpdfid == 6) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,1,2))*phasespace(m1_,m2next_);}
else if (mpdfid == 7) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,2,1))*phasespace(m1_,m2next_);}
else if (mpdfid == 8) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,2,2))*phasespace(m1_,m2next_);}
if (mpdfid == 0) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,0,0))*phasespace(m1next_,m2next_);}
else if (mpdfid == 1) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,0,1))*phasespace(m1next_,m2next_);}
else if (mpdfid == 2) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,1,0))*phasespace(m1next_,m2next_);}
else if (mpdfid == 3) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,0,2))*phasespace(m1next_,m2next_);}
else if (mpdfid == 4) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,2,0))*phasespace(m1next_,m2next_);}
else if (mpdfid == 5) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,1,1))*phasespace(m1next_,m2next_);}
else if (mpdfid == 6) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,1,2))*phasespace(m1next_,m2next_);}
else if (mpdfid == 7) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,2,1))*phasespace(m1next_,m2next_);}
else if (mpdfid == 8) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,2,2))*phasespace(m1next_,m2next_);}
mpdfarray[mintindex] = 0.25*(point1+point2+point3+point4)*mintstep*mintstep;
}
__global__ void set_mass_integrals(double *mass_integrals) {
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
}
__global__ void compute_nw(double *MCdata, int j1, int j2, int h, int j1p, int j2p, int hp, int part, int NMCevts, double *evout) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= NMCevts) { return;}
int i0 = row*6;
double m1_ = MCdata[0 + i0];
double m2_ = MCdata[1 + i0];
double cos1_ = MCdata[2 + i0];
double cos2_ = MCdata[3 + i0];
double phi_ = MCdata[4 + i0];
double weight_ = MCdata[5 + i0];
pycuda::complex<double> ev_nw_temp = Nj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp)*Mj1j2(m1_,m2_,j1,j2)*pycuda::conj(Mj1j2(m1_,m2_,j1p,j2p))*phasespace(m1_,m2_)*fi(cos1_,(int) fjjphhpindexdict[j1][j1p][h][hp])*fi(cos2_,(int) fjjphhpindexdict[j2][j2p][h][hp])*gi(phi_,(int) ghhpindexdict[h][hp]);
if (part == 0) {evout[row] = 100.*weight_*pycuda::real(ev_nw_temp);}
else {evout[row] = 100.*weight_*pycuda::imag(ev_nw_temp);}
}
__global__ void set_nw_val(double nwval, int year_opt, int trig_opt, int inw) {
nw_comp_matrix[year_opt][trig_opt][inw] = nwval;
}
__global__ void set_spline_coefs(double a_2011_L0TIS_mod[][4], double a_2011_L0noTIS_mod[][4], double a_2012_L0TIS_mod[][4], double a_2012_L0noTIS_mod[][4]) {
for (int ibin=0; ibin<5; ++ibin) {
for (int deg=0; deg<4; ++deg) {
a_2011_L0TIS_wide[ibin][deg] = a_2011_L0TIS_mod[ibin][deg];
a_2011_L0noTIS_wide[ibin][deg] = a_2011_L0noTIS_mod[ibin][deg];
a_2012_L0TIS_wide[ibin][deg] = a_2012_L0TIS_mod[ibin][deg];
a_2012_L0noTIS_wide[ibin][deg] = a_2012_L0noTIS_mod[ibin][deg];
}
}
}
__global__ void compute_nwcov(double *masterevarray, int numofevts, double nwcovout[][336]) {
int nwcovlinindex = threadIdx.x + blockDim.x * blockIdx.x;
if (nwcovlinindex >= 336*336) { return;}
int inw = nwcovlinindex / 336;
int jnw = nwcovlinindex % 336;
double sumi = 0;
double sumj = 0;
double sumij = 0;
for( int kev = 0; kev < numofevts; kev++ ) {
sumi += masterevarray[inw*numofevts+kev];
sumj += masterevarray[jnw*numofevts+kev];
sumij += masterevarray[inw*numofevts+kev]*masterevarray[jnw*numofevts+kev];
}
nwcovout[inw][jnw] = sumij-sumi*sumj/numofevts;
}
__device__ double real_acc_mint(int imint, double ma, double mb) {
if (imint == 0) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 1) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 2) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 3) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 4) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 5) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 6) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 7) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 8) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 9) {return pycuda::real(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 10) {return pycuda::real(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 11) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 12) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 13) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 14) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,2,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 15) {return pycuda::real(Mj1j2(ma,mb,2,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 16) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 17) {return pycuda::real(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 18) {return pycuda::real(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 19) {return pycuda::real(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,2,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 20) {return pycuda::real(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 21) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 22) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,0,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 23) {return pycuda::real(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,1,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 24) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 25) {return pycuda::real(Mj1j2(ma,mb,2,0)*pycuda::conj(Mj1j2(ma,mb,2,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 26) {return pycuda::real(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 27) {return pycuda::real(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 28) {return pycuda::real(Mj1j2(ma,mb,2,1)*pycuda::conj(Mj1j2(ma,mb,2,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 29) {return pycuda::real(Mj1j2(ma,mb,2,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
return 0.;
}
__device__ double imag_acc_mint(int imint, double ma, double mb) {
if (imint == 0) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 1) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 2) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 3) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 4) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 5) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 6) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 7) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 8) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 9) {return pycuda::imag(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 10) {return pycuda::imag(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 11) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 12) {return pycuda::imag(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 13) {return pycuda::imag(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 14) {return pycuda::imag(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,2,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 15) {return pycuda::imag(Mj1j2(ma,mb,2,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 16) {return pycuda::imag(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 17) {return pycuda::imag(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 18) {return pycuda::imag(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 19) {return pycuda::imag(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,2,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 20) {return pycuda::imag(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
return 0.;
}
__global__ void compute_acc_mint_array(int imint, int part, int mintnpoints, double minthlimit, double *mpdfarray) {
int mintindex = threadIdx.x + blockDim.x * blockIdx.x;
if (mintindex >= mintnpoints*mintnpoints) { return;}
int im1 = mintindex / mintnpoints;
int im2 = mintindex % mintnpoints;
double mintstep = (minthlimit-750.)/mintnpoints;
double m1_ = 750.+im1*mintstep;
double m2_ = 750.+im2*mintstep;
double m1next_ = 750.+(im1+1)*mintstep;
double m2next_ = 750.+(im2+1)*mintstep;
double point1 = 0.;
double point2 = 0.;
double point3 = 0.;
double point4 = 0.;
if (part == 0) {
point1 = real_acc_mint(imint,m1_,m2_);
point2 = real_acc_mint(imint,m1next_,m2_);
point3 = real_acc_mint(imint,m1_,m2next_);
point4 = real_acc_mint(imint,m1next_,m2next_);
}
else {
point1 = imag_acc_mint(imint,m1_,m2_);
point2 = imag_acc_mint(imint,m1next_,m2_);
point3 = imag_acc_mint(imint,m1_,m2next_);
point4 = imag_acc_mint(imint,m1next_,m2next_);
}
mpdfarray[mintindex] = 0.25*(point1+point2+point3+point4)*mintstep*mintstep;
}
__global__ void set_all_mass_integrals(double *mass_integrals) {
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
Ih1Re = mass_integrals[9];
Ih2Re = mass_integrals[10];
Ih3Re = mass_integrals[11];
Ih4Re = mass_integrals[12];
Ih5Re = mass_integrals[13];
Ih6Re = mass_integrals[14];
Ih7Re = mass_integrals[15];
Ih8Re = mass_integrals[16];
Ih9Re = mass_integrals[17];
Ih10Re = mass_integrals[18];
Ih11Re = mass_integrals[19];
Ih12Re = mass_integrals[20];
Ih13Re = mass_integrals[21];
Ih14Re = mass_integrals[22];
Ih15Re = mass_integrals[23];
Ih16Re = mass_integrals[24];
Ih17Re = mass_integrals[25];
Ih18Re = mass_integrals[26];
Ih19Re = mass_integrals[27];
Ih20Re = mass_integrals[28];
Ih21Re = mass_integrals[29];
Ih22Re = mass_integrals[30];
Ih23Re = mass_integrals[31];
Ih24Re = mass_integrals[32];
Ih25Re = mass_integrals[33];
Ih26Re = mass_integrals[34];
Ih27Re = mass_integrals[35];
Ih28Re = mass_integrals[36];
Ih29Re = mass_integrals[37];
Ih30Re = mass_integrals[38];
Ih1Im = mass_integrals[39];
Ih2Im = mass_integrals[40];
Ih3Im = mass_integrals[41];
Ih4Im = mass_integrals[42];
Ih5Im = mass_integrals[43];
Ih6Im = mass_integrals[44];
Ih7Im = mass_integrals[45];
Ih8Im = mass_integrals[46];
Ih9Im = mass_integrals[47];
Ih10Im = mass_integrals[48];
Ih11Im = mass_integrals[49];
Ih12Im = mass_integrals[50];
Ih13Im = mass_integrals[51];
Ih14Im = mass_integrals[52];
Ih15Im = mass_integrals[53];
Ih16Im = mass_integrals[54];
Ih17Im = mass_integrals[55];
Ih18Im = mass_integrals[56];
Ih19Im = mass_integrals[57];
Ih20Im = mass_integrals[58];
Ih21Im = mass_integrals[59];
reIhj1j2j1pj2pdict[0][0][0][0] = pycuda::real(pycuda::complex<double>(Ih22Re,0.));
reIhj1j2j1pj2pdict[0][0][0][1] = pycuda::real(pycuda::complex<double>(Ih1Re,Ih1Im));
reIhj1j2j1pj2pdict[0][0][0][2] = pycuda::real(pycuda::complex<double>(Ih2Re,Ih2Im));
reIhj1j2j1pj2pdict[0][0][1][0] = pycuda::real(pycuda::complex<double>(Ih1Re,Ih1Im));
reIhj1j2j1pj2pdict[0][0][1][1] = pycuda::real(pycuda::complex<double>(Ih3Re,Ih3Im));
reIhj1j2j1pj2pdict[0][0][1][2] = pycuda::real(pycuda::complex<double>(Ih4Re,Ih4Im));
reIhj1j2j1pj2pdict[0][0][2][0] = pycuda::real(pycuda::complex<double>(Ih2Re,Ih2Im));
reIhj1j2j1pj2pdict[0][0][2][1] = pycuda::real(pycuda::complex<double>(Ih4Re,Ih4Im));
reIhj1j2j1pj2pdict[0][0][2][2] = pycuda::real(pycuda::complex<double>(Ih5Re,Ih5Im));
reIhj1j2j1pj2pdict[0][1][0][0] = pycuda::real(pycuda::complex<double>(Ih1Re,-Ih1Im));
reIhj1j2j1pj2pdict[0][1][0][1] = pycuda::real(pycuda::complex<double>(Ih23Re,0.));
reIhj1j2j1pj2pdict[0][1][0][2] = pycuda::real(pycuda::complex<double>(Ih6Re,Ih6Im));
reIhj1j2j1pj2pdict[0][1][1][0] = pycuda::real(pycuda::complex<double>(Ih7Re,Ih7Im));
reIhj1j2j1pj2pdict[0][1][1][1] = pycuda::real(pycuda::complex<double>(Ih8Re,Ih8Im));
reIhj1j2j1pj2pdict[0][1][1][2] = pycuda::real(pycuda::complex<double>(Ih9Re,Ih9Im));
reIhj1j2j1pj2pdict[0][1][2][0] = pycuda::real(pycuda::complex<double>(Ih10Re,Ih10Im));
reIhj1j2j1pj2pdict[0][1][2][1] = pycuda::real(pycuda::complex<double>(Ih11Re,Ih11Im));
reIhj1j2j1pj2pdict[0][1][2][2] = pycuda::real(pycuda::complex<double>(Ih12Re,Ih12Im));
reIhj1j2j1pj2pdict[0][2][0][0] = pycuda::real(pycuda::complex<double>(Ih2Re,-Ih2Im));
reIhj1j2j1pj2pdict[0][2][0][1] = pycuda::real(pycuda::complex<double>(Ih6Re,-Ih6Im));
reIhj1j2j1pj2pdict[0][2][0][2] = pycuda::real(pycuda::complex<double>(Ih25Re,0.));
reIhj1j2j1pj2pdict[0][2][1][0] = pycuda::real(pycuda::complex<double>(Ih10Re,-Ih10Im));
reIhj1j2j1pj2pdict[0][2][1][1] = pycuda::real(pycuda::complex<double>(Ih13Re,Ih13Im));
reIhj1j2j1pj2pdict[0][2][1][2] = pycuda::real(pycuda::complex<double>(Ih14Re,Ih14Im));
reIhj1j2j1pj2pdict[0][2][2][0] = pycuda::real(pycuda::complex<double>(Ih15Re,Ih15Im));
reIhj1j2j1pj2pdict[0][2][2][1] = pycuda::real(pycuda::complex<double>(Ih16Re,Ih16Im));
reIhj1j2j1pj2pdict[0][2][2][2] = pycuda::real(pycuda::complex<double>(Ih17Re,Ih17Im));
reIhj1j2j1pj2pdict[1][0][0][0] = pycuda::real(pycuda::complex<double>(Ih1Re,-Ih1Im));
reIhj1j2j1pj2pdict[1][0][0][1] = pycuda::real(pycuda::complex<double>(Ih7Re,Ih7Im));
reIhj1j2j1pj2pdict[1][0][0][2] = pycuda::real(pycuda::complex<double>(Ih10Re,Ih10Im));
reIhj1j2j1pj2pdict[1][0][1][0] = pycuda::real(pycuda::complex<double>(Ih24Re,0.));
reIhj1j2j1pj2pdict[1][0][1][1] = pycuda::real(pycuda::complex<double>(Ih8Re,Ih8Im));
reIhj1j2j1pj2pdict[1][0][1][2] = pycuda::real(pycuda::complex<double>(Ih11Re,Ih11Im));
reIhj1j2j1pj2pdict[1][0][2][0] = pycuda::real(pycuda::complex<double>(Ih6Re,Ih6Im));
reIhj1j2j1pj2pdict[1][0][2][1] = pycuda::real(pycuda::complex<double>(Ih9Re,Ih9Im));
reIhj1j2j1pj2pdict[1][0][2][2] = pycuda::real(pycuda::complex<double>(Ih12Re,Ih12Im));
reIhj1j2j1pj2pdict[1][1][0][0] = pycuda::real(pycuda::complex<double>(Ih3Re,-Ih3Im));
reIhj1j2j1pj2pdict[1][1][0][1] = pycuda::real(pycuda::complex<double>(Ih8Re,-Ih8Im));
reIhj1j2j1pj2pdict[1][1][0][2] = pycuda::real(pycuda::complex<double>(Ih13Re,-Ih13Im));
reIhj1j2j1pj2pdict[1][1][1][0] = pycuda::real(pycuda::complex<double>(Ih8Re,-Ih8Im));
reIhj1j2j1pj2pdict[1][1][1][1] = pycuda::real(pycuda::complex<double>(Ih27Re,0.));
reIhj1j2j1pj2pdict[1][1][1][2] = pycuda::real(pycuda::complex<double>(Ih18Re,Ih18Im));
reIhj1j2j1pj2pdict[1][1][2][0] = pycuda::real(pycuda::complex<double>(Ih13Re,-Ih13Im));
reIhj1j2j1pj2pdict[1][1][2][1] = pycuda::real(pycuda::complex<double>(Ih18Re,Ih18Im));
reIhj1j2j1pj2pdict[1][1][2][2] = pycuda::real(pycuda::complex<double>(Ih19Re,Ih19Im));
reIhj1j2j1pj2pdict[1][2][0][0] = pycuda::real(pycuda::complex<double>(Ih4Re,-Ih4Im));
reIhj1j2j1pj2pdict[1][2][0][1] = pycuda::real(pycuda::complex<double>(Ih9Re,-Ih9Im));
reIhj1j2j1pj2pdict[1][2][0][2] = pycuda::real(pycuda::complex<double>(Ih14Re,-Ih14Im));
reIhj1j2j1pj2pdict[1][2][1][0] = pycuda::real(pycuda::complex<double>(Ih11Re,-Ih11Im));
reIhj1j2j1pj2pdict[1][2][1][1] = pycuda::real(pycuda::complex<double>(Ih18Re,-Ih18Im));
reIhj1j2j1pj2pdict[1][2][1][2] = pycuda::real(pycuda::complex<double>(Ih28Re,0.));
reIhj1j2j1pj2pdict[1][2][2][0] = pycuda::real(pycuda::complex<double>(Ih16Re,-Ih16Im));
reIhj1j2j1pj2pdict[1][2][2][1] = pycuda::real(pycuda::complex<double>(Ih20Re,Ih20Im));
reIhj1j2j1pj2pdict[1][2][2][2] = pycuda::real(pycuda::complex<double>(Ih21Re,Ih21Im));
reIhj1j2j1pj2pdict[2][0][0][0] = pycuda::real(pycuda::complex<double>(Ih2Re,-Ih2Im));
reIhj1j2j1pj2pdict[2][0][0][1] = pycuda::real(pycuda::complex<double>(Ih10Re,-Ih10Im));
reIhj1j2j1pj2pdict[2][0][0][2] = pycuda::real(pycuda::complex<double>(Ih15Re,Ih15Im));
reIhj1j2j1pj2pdict[2][0][1][0] = pycuda::real(pycuda::complex<double>(Ih6Re,-Ih6Im));
reIhj1j2j1pj2pdict[2][0][1][1] = pycuda::real(pycuda::complex<double>(Ih13Re,Ih13Im));
reIhj1j2j1pj2pdict[2][0][1][2] = pycuda::real(pycuda::complex<double>(Ih16Re,Ih16Im));
reIhj1j2j1pj2pdict[2][0][2][0] = pycuda::real(pycuda::complex<double>(Ih26Re,0.));
reIhj1j2j1pj2pdict[2][0][2][1] = pycuda::real(pycuda::complex<double>(Ih14Re,Ih14Im));
reIhj1j2j1pj2pdict[2][0][2][2] = pycuda::real(pycuda::complex<double>(Ih17Re,Ih17Im));
reIhj1j2j1pj2pdict[2][1][0][0] = pycuda::real(pycuda::complex<double>(Ih4Re,-Ih4Im));
reIhj1j2j1pj2pdict[2][1][0][1] = pycuda::real(pycuda::complex<double>(Ih11Re,-Ih11Im));
reIhj1j2j1pj2pdict[2][1][0][2] = pycuda::real(pycuda::complex<double>(Ih16Re,-Ih16Im));
reIhj1j2j1pj2pdict[2][1][1][0] = pycuda::real(pycuda::complex<double>(Ih9Re,-Ih9Im));
reIhj1j2j1pj2pdict[2][1][1][1] = pycuda::real(pycuda::complex<double>(Ih18Re,-Ih18Im));
reIhj1j2j1pj2pdict[2][1][1][2] = pycuda::real(pycuda::complex<double>(Ih20Re,Ih20Im));
reIhj1j2j1pj2pdict[2][1][2][0] = pycuda::real(pycuda::complex<double>(Ih14Re,-Ih14Im));
reIhj1j2j1pj2pdict[2][1][2][1] = pycuda::real(pycuda::complex<double>(Ih29Re,0.));
reIhj1j2j1pj2pdict[2][1][2][2] = pycuda::real(pycuda::complex<double>(Ih21Re,Ih21Im));
reIhj1j2j1pj2pdict[2][2][0][0] = pycuda::real(pycuda::complex<double>(Ih5Re,-Ih5Im));
reIhj1j2j1pj2pdict[2][2][0][1] = pycuda::real(pycuda::complex<double>(Ih12Re,-Ih12Im));
reIhj1j2j1pj2pdict[2][2][0][2] = pycuda::real(pycuda::complex<double>(Ih17Re,-Ih17Im));
reIhj1j2j1pj2pdict[2][2][1][0] = pycuda::real(pycuda::complex<double>(Ih12Re,-Ih12Im));
reIhj1j2j1pj2pdict[2][2][1][1] = pycuda::real(pycuda::complex<double>(Ih19Re,-Ih19Im));
reIhj1j2j1pj2pdict[2][2][1][2] = pycuda::real(pycuda::complex<double>(Ih21Re,-Ih21Im));
reIhj1j2j1pj2pdict[2][2][2][0] = pycuda::real(pycuda::complex<double>(Ih17Re,-Ih17Im));
reIhj1j2j1pj2pdict[2][2][2][1] = pycuda::real(pycuda::complex<double>(Ih21Re,-Ih21Im));
reIhj1j2j1pj2pdict[2][2][2][2] = pycuda::real(pycuda::complex<double>(Ih30Re,0.));
imIhj1j2j1pj2pdict[0][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih22Re,0.));
imIhj1j2j1pj2pdict[0][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih1Re,Ih1Im));
imIhj1j2j1pj2pdict[0][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih2Re,Ih2Im));
imIhj1j2j1pj2pdict[0][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,Ih1Im));
imIhj1j2j1pj2pdict[0][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih3Re,Ih3Im));
imIhj1j2j1pj2pdict[0][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih4Re,Ih4Im));
imIhj1j2j1pj2pdict[0][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,Ih2Im));
imIhj1j2j1pj2pdict[0][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih4Re,Ih4Im));
imIhj1j2j1pj2pdict[0][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih5Re,Ih5Im));
imIhj1j2j1pj2pdict[0][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,-Ih1Im));
imIhj1j2j1pj2pdict[0][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih23Re,0.));
imIhj1j2j1pj2pdict[0][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih6Re,Ih6Im));
imIhj1j2j1pj2pdict[0][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih7Re,Ih7Im));
imIhj1j2j1pj2pdict[0][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,Ih8Im));
imIhj1j2j1pj2pdict[0][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih9Re,Ih9Im));
imIhj1j2j1pj2pdict[0][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih10Re,Ih10Im));
imIhj1j2j1pj2pdict[0][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih11Re,Ih11Im));
imIhj1j2j1pj2pdict[0][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih12Re,Ih12Im));
imIhj1j2j1pj2pdict[0][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,-Ih2Im));
imIhj1j2j1pj2pdict[0][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih6Re,-Ih6Im));
imIhj1j2j1pj2pdict[0][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih25Re,0.));
imIhj1j2j1pj2pdict[0][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih10Re,-Ih10Im));
imIhj1j2j1pj2pdict[0][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih13Re,Ih13Im));
imIhj1j2j1pj2pdict[0][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih14Re,Ih14Im));
imIhj1j2j1pj2pdict[0][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih15Re,Ih15Im));
imIhj1j2j1pj2pdict[0][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih16Re,Ih16Im));
imIhj1j2j1pj2pdict[0][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,Ih17Im));
imIhj1j2j1pj2pdict[1][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,-Ih1Im));
imIhj1j2j1pj2pdict[1][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih7Re,Ih7Im));
imIhj1j2j1pj2pdict[1][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih10Re,Ih10Im));
imIhj1j2j1pj2pdict[1][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih24Re,0.));
imIhj1j2j1pj2pdict[1][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,Ih8Im));
imIhj1j2j1pj2pdict[1][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih11Re,Ih11Im));
imIhj1j2j1pj2pdict[1][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih6Re,Ih6Im));
imIhj1j2j1pj2pdict[1][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih9Re,Ih9Im));
imIhj1j2j1pj2pdict[1][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih12Re,Ih12Im));
imIhj1j2j1pj2pdict[1][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih3Re,-Ih3Im));
imIhj1j2j1pj2pdict[1][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,-Ih8Im));
imIhj1j2j1pj2pdict[1][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih13Re,-Ih13Im));
imIhj1j2j1pj2pdict[1][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih8Re,-Ih8Im));
imIhj1j2j1pj2pdict[1][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih27Re,0.));
imIhj1j2j1pj2pdict[1][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih18Re,Ih18Im));
imIhj1j2j1pj2pdict[1][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih13Re,-Ih13Im));
imIhj1j2j1pj2pdict[1][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,Ih18Im));
imIhj1j2j1pj2pdict[1][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih19Re,Ih19Im));
imIhj1j2j1pj2pdict[1][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih4Re,-Ih4Im));
imIhj1j2j1pj2pdict[1][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih9Re,-Ih9Im));
imIhj1j2j1pj2pdict[1][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih14Re,-Ih14Im));
imIhj1j2j1pj2pdict[1][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih11Re,-Ih11Im));
imIhj1j2j1pj2pdict[1][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,-Ih18Im));
imIhj1j2j1pj2pdict[1][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih28Re,0.));
imIhj1j2j1pj2pdict[1][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih16Re,-Ih16Im));
imIhj1j2j1pj2pdict[1][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih20Re,Ih20Im));
imIhj1j2j1pj2pdict[1][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,Ih21Im));
imIhj1j2j1pj2pdict[2][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,-Ih2Im));
imIhj1j2j1pj2pdict[2][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih10Re,-Ih10Im));
imIhj1j2j1pj2pdict[2][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih15Re,Ih15Im));
imIhj1j2j1pj2pdict[2][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih6Re,-Ih6Im));
imIhj1j2j1pj2pdict[2][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih13Re,Ih13Im));
imIhj1j2j1pj2pdict[2][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih16Re,Ih16Im));
imIhj1j2j1pj2pdict[2][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih26Re,0.));
imIhj1j2j1pj2pdict[2][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih14Re,Ih14Im));
imIhj1j2j1pj2pdict[2][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,Ih17Im));
imIhj1j2j1pj2pdict[2][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih4Re,-Ih4Im));
imIhj1j2j1pj2pdict[2][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih11Re,-Ih11Im));
imIhj1j2j1pj2pdict[2][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih16Re,-Ih16Im));
imIhj1j2j1pj2pdict[2][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih9Re,-Ih9Im));
imIhj1j2j1pj2pdict[2][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,-Ih18Im));
imIhj1j2j1pj2pdict[2][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih20Re,Ih20Im));
imIhj1j2j1pj2pdict[2][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih14Re,-Ih14Im));
imIhj1j2j1pj2pdict[2][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih29Re,0.));
imIhj1j2j1pj2pdict[2][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,Ih21Im));
imIhj1j2j1pj2pdict[2][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih5Re,-Ih5Im));
imIhj1j2j1pj2pdict[2][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih12Re,-Ih12Im));
imIhj1j2j1pj2pdict[2][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,-Ih17Im));
imIhj1j2j1pj2pdict[2][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih12Re,-Ih12Im));
imIhj1j2j1pj2pdict[2][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih19Re,-Ih19Im));
imIhj1j2j1pj2pdict[2][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,-Ih21Im));
imIhj1j2j1pj2pdict[2][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih17Re,-Ih17Im));
imIhj1j2j1pj2pdict[2][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih21Re,-Ih21Im));
imIhj1j2j1pj2pdict[2][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih30Re,0.));
}
__device__ void set_buffer_rew_terms(double *mass_integrals, int iev) {
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
IT_cosh_MCrew[iev] = 4.*gamma_Bs_freq/(-pow(delta_gamma_freq,2) + 4.*pow(gamma_Bs_freq,2));
IT_sinh_MCrew[iev] = 2.*delta_gamma_freq/(-pow(delta_gamma_freq,2) + 4.*pow(gamma_Bs_freq,2));
IT_cos_MCrew[iev] = gamma_Bs_freq/(pow(delta_m_freq,2) + pow(gamma_Bs_freq,2));
IT_sin_MCrew[iev] = delta_m_freq/(pow(delta_m_freq,2) + pow(gamma_Bs_freq,2));
for (int i=0; i<18; ++i) {fi_cos1_MCrew[i][iev] = fi(cos1_MCrew[iev],i+1);}
for (int i=0; i<18; ++i) {fi_cos2_MCrew[i][iev] = fi(cos2_MCrew[iev],i+1);}
for (int i=0; i<15; ++i) {gi_MCrew[i][iev] = gi(phi_MCrew[iev],i+1);}
for (int j1=0; j1<3; ++j1) {
for (int j2=0; j2<3; ++j2) {
pycuda::complex<double> M_temp = Mj1j2(m1_MCrew[iev],m2_MCrew[iev],j1,j2);
reMj1j2_MCrew[j1][j2][iev] = pycuda::real(M_temp);
imMj1j2_MCrew[j1][j2][iev] = pycuda::imag(M_temp);
}
}
phasespace_MCrew[iev] = phasespace(m1_MCrew[iev],m2_MCrew[iev]);
}
__device__ pycuda::complex<double> hj1j2j1pj2p_MCrew(int j1, int j2, int j1p, int j2p, int iev) {
return Mj1j2_MCrew(j1,j2,iev)*pycuda::conj(Mj1j2_MCrew(j1p,j2p,iev))*phasespace_MCrew[iev];
}
__device__ double fjjphhp_cos1_MCrew(int j, int jp, int h, int hp, int iev) {
return fi_cos1_MCrew[(int) fjjphhpindexdict[j][jp][h][hp]-1][iev];
}
__device__ double fjjphhp_cos2_MCrew(int j, int jp, int h, int hp, int iev) {
return fi_cos2_MCrew[(int) fjjphhpindexdict[j][jp][h][hp]-1][iev];
}
__device__ double ghhp_phi_MCrew(int h, int hp, int iev) {
return gi_MCrew[(int) ghhpindexdict[h][hp]-1][iev];
}
__device__ double comp_rew_phys_model(int j1, int j2, int h, int j1p, int j2p, int hp, int iev) {
return pycuda::real(((IT_cosh_MCrew[iev]*M_Average(j1,j2,h,j1p,j2p,hp)-IT_sinh_MCrew[iev]*M_DeltaGamma(j1,j2,h,j1p,j2p,hp))+DCP_prod*(IT_cos_MCrew[iev]*M_DirCP(j1,j2,h,j1p,j2p,hp)+IT_sin_MCrew[iev]*M_MixCP(j1,j2,h,j1p,j2p,hp)))*Nj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp)*hj1j2j1pj2p_MCrew(j1,j2,j1p,j2p,iev))*ghhp_phi_MCrew(h,hp,iev)*fjjphhp_cos1_MCrew(j1,j1p,h,hp,iev)*fjjphhp_cos2_MCrew(j2,j2p,h,hp,iev);
}
__device__ double rew_phys_model(int iev) {
return comp_rew_phys_model(0,0,0,0,0,0,iev)+comp_rew_phys_model(0,1,0,0,1,0,iev)+comp_rew_phys_model(0,2,0,0,2,0,iev)+comp_rew_phys_model(1,0,0,1,0,0,iev)+comp_rew_phys_model(1,1,0,1,1,0,iev)+comp_rew_phys_model(1,1,1,1,1,1,iev)
+comp_rew_phys_model(1,1,2,1,1,2,iev)+comp_rew_phys_model(1,2,0,1,2,0,iev)+comp_rew_phys_model(1,2,1,1,2,1,iev)+comp_rew_phys_model(1,2,2,1,2,2,iev)+comp_rew_phys_model(2,0,0,2,0,0,iev)+comp_rew_phys_model(2,1,0,2,1,0,iev)+comp_rew_phys_model(2,1,1,2,1,1,iev)
+comp_rew_phys_model(2,1,2,2,1,2,iev)+comp_rew_phys_model(2,2,0,2,2,0,iev)+comp_rew_phys_model(2,2,1,2,2,1,iev)+comp_rew_phys_model(2,2,2,2,2,2,iev)+comp_rew_phys_model(2,2,3,2,2,3,iev)+comp_rew_phys_model(2,2,4,2,2,4,iev)+2.*comp_rew_phys_model(0,1,0,0,0,0,iev)
+2.*comp_rew_phys_model(0,1,0,1,0,0,iev)+2.*comp_rew_phys_model(0,1,0,2,0,0,iev)+2.*comp_rew_phys_model(0,2,0,0,0,0,iev)+2.*comp_rew_phys_model(0,2,0,0,1,0,iev)+2.*comp_rew_phys_model(0,2,0,1,0,0,iev)+2.*comp_rew_phys_model(0,2,0,1,1,0,iev)
+2.*comp_rew_phys_model(0,2,0,2,0,0,iev)+2.*comp_rew_phys_model(0,2,0,2,1,0,iev)+2.*comp_rew_phys_model(1,0,0,0,0,0,iev)+2.*comp_rew_phys_model(1,1,0,0,0,0,iev)+2.*comp_rew_phys_model(1,1,0,0,1,0,iev)+2.*comp_rew_phys_model(1,1,0,1,0,0,iev)
+2.*comp_rew_phys_model(1,1,0,2,0,0,iev)+2.*comp_rew_phys_model(1,1,1,0,0,0,iev)+2.*comp_rew_phys_model(1,1,1,0,1,0,iev)+2.*comp_rew_phys_model(1,1,1,0,2,0,iev)+2.*comp_rew_phys_model(1,1,1,1,0,0,iev)+2.*comp_rew_phys_model(1,1,1,1,1,0,iev)
+2.*comp_rew_phys_model(1,1,1,1,2,0,iev)+2.*comp_rew_phys_model(1,1,1,2,0,0,iev)+2.*comp_rew_phys_model(1,1,1,2,1,0,iev)+2.*comp_rew_phys_model(1,1,1,2,2,0,iev)+2.*comp_rew_phys_model(1,1,2,0,0,0,iev)+2.*comp_rew_phys_model(1,1,2,0,1,0,iev)
+2.*comp_rew_phys_model(1,1,2,0,2,0,iev)+2.*comp_rew_phys_model(1,1,2,1,0,0,iev)+2.*comp_rew_phys_model(1,1,2,1,1,0,iev)+2.*comp_rew_phys_model(1,1,2,1,1,1,iev)+2.*comp_rew_phys_model(1,1,2,1,2,0,iev)+2.*comp_rew_phys_model(1,1,2,1,2,1,iev)
+2.*comp_rew_phys_model(1,1,2,2,0,0,iev)+2.*comp_rew_phys_model(1,1,2,2,1,0,iev)+2.*comp_rew_phys_model(1,1,2,2,1,1,iev)+2.*comp_rew_phys_model(1,1,2,2,2,0,iev)+2.*comp_rew_phys_model(1,1,2,2,2,1,iev)+2.*comp_rew_phys_model(1,2,0,0,0,0,iev)
+2.*comp_rew_phys_model(1,2,0,0,1,0,iev)+2.*comp_rew_phys_model(1,2,0,0,2,0,iev)+2.*comp_rew_phys_model(1,2,0,1,0,0,iev)+2.*comp_rew_phys_model(1,2,0,1,1,0,iev)+2.*comp_rew_phys_model(1,2,0,2,0,0,iev)+2.*comp_rew_phys_model(1,2,0,2,1,0,iev)
+2.*comp_rew_phys_model(1,2,1,0,0,0,iev)+2.*comp_rew_phys_model(1,2,1,0,1,0,iev)+2.*comp_rew_phys_model(1,2,1,0,2,0,iev)+2.*comp_rew_phys_model(1,2,1,1,0,0,iev)+2.*comp_rew_phys_model(1,2,1,1,1,0,iev)+2.*comp_rew_phys_model(1,2,1,1,1,1,iev)
+2.*comp_rew_phys_model(1,2,1,1,2,0,iev)+2.*comp_rew_phys_model(1,2,1,2,0,0,iev)+2.*comp_rew_phys_model(1,2,1,2,1,0,iev)+2.*comp_rew_phys_model(1,2,1,2,1,1,iev)+2.*comp_rew_phys_model(1,2,1,2,2,0,iev)+2.*comp_rew_phys_model(1,2,2,0,0,0,iev)
+2.*comp_rew_phys_model(1,2,2,0,1,0,iev)+2.*comp_rew_phys_model(1,2,2,0,2,0,iev)+2.*comp_rew_phys_model(1,2,2,1,0,0,iev)+2.*comp_rew_phys_model(1,2,2,1,1,0,iev)+2.*comp_rew_phys_model(1,2,2,1,1,1,iev)+2.*comp_rew_phys_model(1,2,2,1,1,2,iev)
+2.*comp_rew_phys_model(1,2,2,1,2,0,iev)+2.*comp_rew_phys_model(1,2,2,1,2,1,iev)+2.*comp_rew_phys_model(1,2,2,2,0,0,iev)+2.*comp_rew_phys_model(1,2,2,2,1,0,iev)+2.*comp_rew_phys_model(1,2,2,2,1,1,iev)+2.*comp_rew_phys_model(1,2,2,2,1,2,iev)
+2.*comp_rew_phys_model(1,2,2,2,2,0,iev)+2.*comp_rew_phys_model(1,2,2,2,2,1,iev)+2.*comp_rew_phys_model(2,0,0,0,0,0,iev)+2.*comp_rew_phys_model(2,0,0,1,0,0,iev)+2.*comp_rew_phys_model(2,1,0,0,0,0,iev)+2.*comp_rew_phys_model(2,1,0,0,1,0,iev)
+2.*comp_rew_phys_model(2,1,0,1,0,0,iev)+2.*comp_rew_phys_model(2,1,0,1,1,0,iev)+2.*comp_rew_phys_model(2,1,0,2,0,0,iev)+2.*comp_rew_phys_model(2,1,1,0,0,0,iev)+2.*comp_rew_phys_model(2,1,1,0,1,0,iev)+2.*comp_rew_phys_model(2,1,1,0,2,0,iev)
+2.*comp_rew_phys_model(2,1,1,1,0,0,iev)+2.*comp_rew_phys_model(2,1,1,1,1,0,iev)+2.*comp_rew_phys_model(2,1,1,1,1,1,iev)+2.*comp_rew_phys_model(2,1,1,1,2,0,iev)+2.*comp_rew_phys_model(2,1,1,2,0,0,iev)+2.*comp_rew_phys_model(2,1,1,2,1,0,iev)
+2.*comp_rew_phys_model(2,1,1,2,2,0,iev)+2.*comp_rew_phys_model(2,1,2,0,0,0,iev)+2.*comp_rew_phys_model(2,1,2,0,1,0,iev)+2.*comp_rew_phys_model(2,1,2,0,2,0,iev)+2.*comp_rew_phys_model(2,1,2,1,0,0,iev)+2.*comp_rew_phys_model(2,1,2,1,1,0,iev)
+2.*comp_rew_phys_model(2,1,2,1,1,1,iev)+2.*comp_rew_phys_model(2,1,2,1,1,2,iev)+2.*comp_rew_phys_model(2,1,2,1,2,0,iev)+2.*comp_rew_phys_model(2,1,2,1,2,1,iev)+2.*comp_rew_phys_model(2,1,2,2,0,0,iev)+2.*comp_rew_phys_model(2,1,2,2,1,0,iev)
+2.*comp_rew_phys_model(2,1,2,2,1,1,iev)+2.*comp_rew_phys_model(2,1,2,2,2,0,iev)+2.*comp_rew_phys_model(2,1,2,2,2,1,iev)+2.*comp_rew_phys_model(2,2,0,0,0,0,iev)+2.*comp_rew_phys_model(2,2,0,0,1,0,iev)+2.*comp_rew_phys_model(2,2,0,0,2,0,iev)
+2.*comp_rew_phys_model(2,2,0,1,0,0,iev)+2.*comp_rew_phys_model(2,2,0,1,1,0,iev)+2.*comp_rew_phys_model(2,2,0,1,2,0,iev)+2.*comp_rew_phys_model(2,2,0,2,0,0,iev)+2.*comp_rew_phys_model(2,2,0,2,1,0,iev)+2.*comp_rew_phys_model(2,2,1,0,0,0,iev)
+2.*comp_rew_phys_model(2,2,1,0,1,0,iev)+2.*comp_rew_phys_model(2,2,1,0,2,0,iev)+2.*comp_rew_phys_model(2,2,1,1,0,0,iev)+2.*comp_rew_phys_model(2,2,1,1,1,0,iev)+2.*comp_rew_phys_model(2,2,1,1,1,1,iev)+2.*comp_rew_phys_model(2,2,1,1,2,0,iev)
+2.*comp_rew_phys_model(2,2,1,1,2,1,iev)+2.*comp_rew_phys_model(2,2,1,2,0,0,iev)+2.*comp_rew_phys_model(2,2,1,2,1,0,iev)+2.*comp_rew_phys_model(2,2,1,2,1,1,iev)+2.*comp_rew_phys_model(2,2,1,2,2,0,iev)+2.*comp_rew_phys_model(2,2,2,0,0,0,iev)
+2.*comp_rew_phys_model(2,2,2,0,1,0,iev)+2.*comp_rew_phys_model(2,2,2,0,2,0,iev)+2.*comp_rew_phys_model(2,2,2,1,0,0,iev)+2.*comp_rew_phys_model(2,2,2,1,1,0,iev)+2.*comp_rew_phys_model(2,2,2,1,1,1,iev)+2.*comp_rew_phys_model(2,2,2,1,1,2,iev)
+2.*comp_rew_phys_model(2,2,2,1,2,0,iev)+2.*comp_rew_phys_model(2,2,2,1,2,1,iev)+2.*comp_rew_phys_model(2,2,2,1,2,2,iev)+2.*comp_rew_phys_model(2,2,2,2,0,0,iev)+2.*comp_rew_phys_model(2,2,2,2,1,0,iev)+2.*comp_rew_phys_model(2,2,2,2,1,1,iev)
+2.*comp_rew_phys_model(2,2,2,2,1,2,iev)+2.*comp_rew_phys_model(2,2,2,2,2,0,iev)+2.*comp_rew_phys_model(2,2,2,2,2,1,iev)+2.*comp_rew_phys_model(2,2,3,0,0,0,iev)+2.*comp_rew_phys_model(2,2,3,0,1,0,iev)+2.*comp_rew_phys_model(2,2,3,0,2,0,iev)
+2.*comp_rew_phys_model(2,2,3,1,0,0,iev)+2.*comp_rew_phys_model(2,2,3,1,1,0,iev)+2.*comp_rew_phys_model(2,2,3,1,1,1,iev)+2.*comp_rew_phys_model(2,2,3,1,1,2,iev)+2.*comp_rew_phys_model(2,2,3,1,2,0,iev)+2.*comp_rew_phys_model(2,2,3,1,2,1,iev)
+2.*comp_rew_phys_model(2,2,3,1,2,2,iev)+2.*comp_rew_phys_model(2,2,3,2,0,0,iev)+2.*comp_rew_phys_model(2,2,3,2,1,0,iev)+2.*comp_rew_phys_model(2,2,3,2,1,1,iev)+2.*comp_rew_phys_model(2,2,3,2,1,2,iev)+2.*comp_rew_phys_model(2,2,3,2,2,0,iev)
+2.*comp_rew_phys_model(2,2,3,2,2,1,iev)+2.*comp_rew_phys_model(2,2,3,2,2,2,iev)+2.*comp_rew_phys_model(2,2,4,0,0,0,iev)+2.*comp_rew_phys_model(2,2,4,0,1,0,iev)+2.*comp_rew_phys_model(2,2,4,0,2,0,iev)+2.*comp_rew_phys_model(2,2,4,1,0,0,iev)
+2.*comp_rew_phys_model(2,2,4,1,1,0,iev)+2.*comp_rew_phys_model(2,2,4,1,1,1,iev)+2.*comp_rew_phys_model(2,2,4,1,1,2,iev)+2.*comp_rew_phys_model(2,2,4,1,2,0,iev)+2.*comp_rew_phys_model(2,2,4,1,2,1,iev)+2.*comp_rew_phys_model(2,2,4,1,2,2,iev)
+2.*comp_rew_phys_model(2,2,4,2,0,0,iev)+2.*comp_rew_phys_model(2,2,4,2,1,0,iev)+2.*comp_rew_phys_model(2,2,4,2,1,1,iev)+2.*comp_rew_phys_model(2,2,4,2,1,2,iev)+2.*comp_rew_phys_model(2,2,4,2,2,0,iev)+2.*comp_rew_phys_model(2,2,4,2,2,1,iev)
+2.*comp_rew_phys_model(2,2,4,2,2,2,iev)+2.*comp_rew_phys_model(2,2,4,2,2,3,iev);
}
__global__ void compute_phys_weight(double *MCdata, double *out, double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params, double *mass_integrals, int NMCevts) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= NMCevts) { return;}
int i0 = row*6;
m1_MCrew[row] = MCdata[0 + i0];
m2_MCrew[row] = MCdata[1 + i0];
cos1_MCrew[row] = MCdata[2 + i0];
cos2_MCrew[row] = MCdata[3 + i0];
phi_MCrew[row] = MCdata[4 + i0];
double weight_invgen = MCdata[5 + i0];
set_buffer_amplitudes(re_amps,dirCP_asyms,im_amps,weak_phases,mixing_params,calib_params);
set_buffer_rew_terms(mass_integrals,row);
double weight_phys = rew_phys_model(row);
out[row] = 1.e4*weight_invgen*weight_phys;
}
}
| 859761197ad3bb57962188646867b1d8106b5d21.cu | #include <math.h>
#include <stdio.h>
#include <pycuda-complex.hpp>
#include<curand.h>
#include<curand_kernel.h>
extern "C" {
// ##########################################
// Global variables
#define pi 3.14159265359
#define MPion 139.57018
#define MKaon 493.667
#define MKst_1_1410 1414.
#define GKst_1_1410 232.
#define MKst_1_1680 1717.
#define GKst_1_1680 322.
#define MBs 5366.77
__device__ double DCP_prod;
__device__ double MEta = 547.;
__device__ double sAdler_Stheo = 0.236;
__device__ double B0_Stheo = 0.411;
__device__ double B1_Stheo = 0.162;
__device__ double alpha_Stheo = 1.15;
__device__ double s0_Stheo = 1.21;
__device__ double phi0_Stheo = -0.19;
__device__ double phi1_Stheo = 5.03;
__device__ double a_Stheo = -5.2;
__device__ double b_Stheo = 7.6;
__device__ double c_Stheo = -1.73;
__device__ double sqrtsr1_Stheo = 1.401;
__device__ double sqrtsr2_Stheo = 1.817;
__device__ double e1_Stheo = 1.;
__device__ double e2_Stheo = 0.184;
__device__ double G1_Stheo = 0.497;
__device__ double G2_Stheo = 0.28;
__device__ double s_Kpi_palano = (139.57018*139.57018+493.667*493.667)/1000./1000.;
__device__ double s_A_palano = 0.87753*(139.57018*139.57018+493.667*493.667)/1000./1000.;
__device__ double s_a_palano = 1.7991;
__device__ double g_1_a_palano = 0.3139;
__device__ double g_2_a_palano = -0.00775;
__device__ double s_b_palano = 8.3627;
__device__ double g_1_b_palano = 1.1804;
__device__ double g_2_b_palano = -0.22335;
__device__ double C_11_0_palano = -0.1553;
__device__ double C_11_1_palano = 0.0909;
__device__ double C_11_2_palano = 0.8618;
__device__ double C_11_3_palano = 0.0629;
__device__ double C_12_0_palano = 0.0738;
__device__ double C_12_1_palano = 0.3866;
__device__ double C_12_2_palano = 1.2195;
__device__ double C_12_3_palano = 0.8390;
__device__ double C_22_0_palano = -0.0036;
__device__ double C_22_1_palano = 0.2590;
__device__ double C_22_2_palano = 1.6950;
__device__ double C_22_3_palano = 2.2300;
__device__ double A_1_0_palano = 1.;
__device__ double A_1_1_palano = 0.00491636810678;
__device__ double A_1_2_palano = 2.12489529189;
__device__ double A_1_3_palano = 0.56004179484;
__device__ double A_1_4_palano = 0.;
__device__ double A_2_0_palano = -4.20943829183;
__device__ double A_2_1_palano = -1.2110147687;
__device__ double A_2_2_palano = 2.28474898994;
__device__ double A_2_3_palano = 5.93332582489;
__device__ double A_2_4_palano = 0.;
// ##########################################
// Auxiliar elements
__device__ double fjjphhpindexdict[3][3][5][5];
__device__ double ghhpindexdict[5][5];
__device__ double reNj1j2hdict[3][3][5];
__device__ double imNj1j2hdict[3][3][5];
__device__ double spl_knot_vector[2][2][6];
__device__ pycuda::complex<double> Nj1j2hdict(int j1, int j2, int h) {
return pycuda::complex<double>(reNj1j2hdict[j1][j2][h],imNj1j2hdict[j1][j2][h]);
}
// ##########################################
// Initializer
__global__ void initialize() {
fjjphhpindexdict[0][0][0][0] = 1;
fjjphhpindexdict[0][1][0][0] = 2;
fjjphhpindexdict[0][1][0][1] = 3;
fjjphhpindexdict[0][1][0][2] = 3;
fjjphhpindexdict[0][2][0][0] = 4;
fjjphhpindexdict[0][2][0][1] = 5;
fjjphhpindexdict[0][2][0][2] = 5;
fjjphhpindexdict[1][0][0][0] = 2;
fjjphhpindexdict[1][1][0][0] = 6;
fjjphhpindexdict[1][1][0][1] = 5;
fjjphhpindexdict[1][1][0][2] = 5;
fjjphhpindexdict[1][2][0][0] = 7;
fjjphhpindexdict[1][2][0][1] = 8;
fjjphhpindexdict[1][2][0][2] = 8;
fjjphhpindexdict[1][0][1][0] = 3;
fjjphhpindexdict[1][0][2][0] = 3;
fjjphhpindexdict[1][1][1][0] = 5;
fjjphhpindexdict[1][1][2][0] = 5;
fjjphhpindexdict[1][1][1][1] = 9;
fjjphhpindexdict[1][1][1][2] = 9;
fjjphhpindexdict[1][1][2][1] = 9;
fjjphhpindexdict[1][1][2][2] = 9;
fjjphhpindexdict[1][2][1][0] = 10;
fjjphhpindexdict[1][2][2][0] = 10;
fjjphhpindexdict[1][2][1][1] = 11;
fjjphhpindexdict[1][2][1][2] = 11;
fjjphhpindexdict[1][2][2][1] = 11;
fjjphhpindexdict[1][2][2][2] = 11;
fjjphhpindexdict[2][0][0][0] = 4;
fjjphhpindexdict[2][1][0][0] = 7;
fjjphhpindexdict[2][1][0][1] = 10;
fjjphhpindexdict[2][1][0][2] = 10;
fjjphhpindexdict[2][2][0][0] = 12;
fjjphhpindexdict[2][2][0][1] = 13;
fjjphhpindexdict[2][2][0][2] = 13;
fjjphhpindexdict[2][0][1][0] = 5;
fjjphhpindexdict[2][0][2][0] = 5;
fjjphhpindexdict[2][1][1][0] = 8;
fjjphhpindexdict[2][1][2][0] = 8;
fjjphhpindexdict[2][1][1][1] = 11;
fjjphhpindexdict[2][1][1][2] = 11;
fjjphhpindexdict[2][1][2][1] = 11;
fjjphhpindexdict[2][1][2][2] = 11;
fjjphhpindexdict[2][2][1][0] = 13;
fjjphhpindexdict[2][2][2][0] = 13;
fjjphhpindexdict[2][2][1][1] = 14;
fjjphhpindexdict[2][2][1][2] = 14;
fjjphhpindexdict[2][2][2][1] = 14;
fjjphhpindexdict[2][2][2][2] = 14;
fjjphhpindexdict[0][2][0][3] = 9;
fjjphhpindexdict[0][2][0][4] = 9;
fjjphhpindexdict[1][2][0][3] = 11;
fjjphhpindexdict[1][2][0][4] = 11;
fjjphhpindexdict[1][2][1][3] = 15;
fjjphhpindexdict[1][2][1][4] = 15;
fjjphhpindexdict[1][2][2][3] = 15;
fjjphhpindexdict[1][2][2][4] = 15;
fjjphhpindexdict[2][2][0][3] = 16;
fjjphhpindexdict[2][2][0][4] = 16;
fjjphhpindexdict[2][2][1][3] = 17;
fjjphhpindexdict[2][2][1][4] = 17;
fjjphhpindexdict[2][2][2][3] = 17;
fjjphhpindexdict[2][2][2][4] = 17;
fjjphhpindexdict[2][0][3][0] = 9;
fjjphhpindexdict[2][0][4][0] = 9;
fjjphhpindexdict[2][1][3][0] = 11;
fjjphhpindexdict[2][1][4][0] = 11;
fjjphhpindexdict[2][1][3][1] = 15;
fjjphhpindexdict[2][1][3][2] = 15;
fjjphhpindexdict[2][1][4][1] = 15;
fjjphhpindexdict[2][1][4][2] = 15;
fjjphhpindexdict[2][2][3][0] = 16;
fjjphhpindexdict[2][2][4][0] = 16;
fjjphhpindexdict[2][2][3][1] = 17;
fjjphhpindexdict[2][2][3][2] = 17;
fjjphhpindexdict[2][2][4][1] = 17;
fjjphhpindexdict[2][2][4][2] = 17;
fjjphhpindexdict[2][2][3][3] = 18;
fjjphhpindexdict[2][2][3][4] = 18;
fjjphhpindexdict[2][2][4][3] = 18;
fjjphhpindexdict[2][2][4][4] = 18;
ghhpindexdict[0][0] = 1;
ghhpindexdict[0][1] = 2;
ghhpindexdict[0][2] = 3;
ghhpindexdict[1][0] = 2;
ghhpindexdict[1][1] = 4;
ghhpindexdict[1][2] = 5;
ghhpindexdict[2][0] = 3;
ghhpindexdict[2][1] = 5;
ghhpindexdict[2][2] = 6;
ghhpindexdict[0][3] = 7;
ghhpindexdict[0][4] = 8;
ghhpindexdict[1][3] = 9;
ghhpindexdict[1][4] = 10;
ghhpindexdict[2][3] = 11;
ghhpindexdict[2][4] = 12;
ghhpindexdict[3][0] = 7;
ghhpindexdict[3][1] = 9;
ghhpindexdict[3][2] = 11;
ghhpindexdict[3][3] = 13;
ghhpindexdict[3][4] = 14;
ghhpindexdict[4][0] = 8;
ghhpindexdict[4][1] = 10;
ghhpindexdict[4][2] = 12;
ghhpindexdict[4][3] = 14;
ghhpindexdict[4][4] = 15;
reNj1j2hdict[0][0][0] = pycuda::real(pycuda::complex<double>(1./(2.*sqrt(2.*pi)),0.));
reNj1j2hdict[0][1][0] = pycuda::real(pycuda::complex<double>(-sqrt(3.)/(2.*sqrt(2.*pi)),0.));
reNj1j2hdict[0][2][0] = pycuda::real(pycuda::complex<double>(sqrt(5.)/(4.*sqrt(2.*pi)),0.));
reNj1j2hdict[1][0][0] = pycuda::real(pycuda::complex<double>(sqrt(3.)/(2.*sqrt(2.*pi)),0.));
reNj1j2hdict[1][1][0] = pycuda::real(pycuda::complex<double>(-3./(2.*sqrt(2.*pi)),0.));
reNj1j2hdict[1][1][1] = pycuda::real(pycuda::complex<double>(-3./(4.*sqrt(pi)),0.));
reNj1j2hdict[1][1][2] = pycuda::real(pycuda::complex<double>(0.,-3./(4.*sqrt(pi))));
reNj1j2hdict[1][2][0] = pycuda::real(pycuda::complex<double>(sqrt(15.)/(4.*sqrt(2.*pi)),0.));
reNj1j2hdict[1][2][1] = pycuda::real(pycuda::complex<double>(3.*sqrt(5.)/(4.*sqrt(pi)),0.));
reNj1j2hdict[1][2][2] = pycuda::real(pycuda::complex<double>(0.,3.*sqrt(5.)/(4.*sqrt(pi))));
reNj1j2hdict[2][0][0] = pycuda::real(pycuda::complex<double>(sqrt(5.)/(4.*sqrt(2.*pi)),0.));
reNj1j2hdict[2][1][0] = pycuda::real(pycuda::complex<double>(-sqrt(15.)/(4.*sqrt(2.*pi)),0.));
reNj1j2hdict[2][1][1] = pycuda::real(pycuda::complex<double>(-3.*sqrt(5.)/(4.*sqrt(pi)),0.));
reNj1j2hdict[2][1][2] = pycuda::real(pycuda::complex<double>(0.,-3.*sqrt(5.)/(4.*sqrt(pi))));
reNj1j2hdict[2][2][0] = pycuda::real(pycuda::complex<double>(5./(8.*sqrt(2.*pi)),0.));
reNj1j2hdict[2][2][1] = pycuda::real(pycuda::complex<double>(15./(4.*sqrt(pi)),0.));
reNj1j2hdict[2][2][2] = pycuda::real(pycuda::complex<double>(0.,15./(4.*sqrt(pi))));
reNj1j2hdict[2][2][3] = pycuda::real(pycuda::complex<double>(15./(16.*sqrt(pi)),0.));
reNj1j2hdict[2][2][4] = pycuda::real(pycuda::complex<double>(0.,15./(16.*sqrt(pi))));
imNj1j2hdict[0][0][0] = pycuda::imag(pycuda::complex<double>(1./(2.*sqrt(2.*pi)),0.));
imNj1j2hdict[0][1][0] = pycuda::imag(pycuda::complex<double>(-sqrt(3.)/(2.*sqrt(2.*pi)),0.));
imNj1j2hdict[0][2][0] = pycuda::imag(pycuda::complex<double>(sqrt(5.)/(4.*sqrt(2.*pi)),0.));
imNj1j2hdict[1][0][0] = pycuda::imag(pycuda::complex<double>(sqrt(3.)/(2.*sqrt(2.*pi)),0.));
imNj1j2hdict[1][1][0] = pycuda::imag(pycuda::complex<double>(-3./(2.*sqrt(2.*pi)),0.));
imNj1j2hdict[1][1][1] = pycuda::imag(pycuda::complex<double>(-3./(4.*sqrt(pi)),0.));
imNj1j2hdict[1][1][2] = pycuda::imag(pycuda::complex<double>(0.,-3./(4.*sqrt(pi))));
imNj1j2hdict[1][2][0] = pycuda::imag(pycuda::complex<double>(sqrt(15.)/(4.*sqrt(2.*pi)),0.));
imNj1j2hdict[1][2][1] = pycuda::imag(pycuda::complex<double>(3.*sqrt(5.)/(4.*sqrt(pi)),0.));
imNj1j2hdict[1][2][2] = pycuda::imag(pycuda::complex<double>(0.,3.*sqrt(5.)/(4.*sqrt(pi))));
imNj1j2hdict[2][0][0] = pycuda::imag(pycuda::complex<double>(sqrt(5.)/(4.*sqrt(2.*pi)),0.));
imNj1j2hdict[2][1][0] = pycuda::imag(pycuda::complex<double>(-sqrt(15.)/(4.*sqrt(2.*pi)),0.));
imNj1j2hdict[2][1][1] = pycuda::imag(pycuda::complex<double>(-3.*sqrt(5.)/(4.*sqrt(pi)),0.));
imNj1j2hdict[2][1][2] = pycuda::imag(pycuda::complex<double>(0.,-3.*sqrt(5.)/(4.*sqrt(pi))));
imNj1j2hdict[2][2][0] = pycuda::imag(pycuda::complex<double>(5./(8.*sqrt(2.*pi)),0.));
imNj1j2hdict[2][2][1] = pycuda::imag(pycuda::complex<double>(15./(4.*sqrt(pi)),0.));
imNj1j2hdict[2][2][2] = pycuda::imag(pycuda::complex<double>(0.,15./(4.*sqrt(pi))));
imNj1j2hdict[2][2][3] = pycuda::imag(pycuda::complex<double>(15./(16.*sqrt(pi)),0.));
imNj1j2hdict[2][2][4] = pycuda::imag(pycuda::complex<double>(0.,15./(16.*sqrt(pi))));
}
// ##########################################
// Normalisation weights
__device__ int indexdictcpp[4050] = {0,1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,2,
3,-1,-1,-1,-1,-1,-1,-1,-1,4,5,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,6,7,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,8,9,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,10,
11,-1,-1,-1,-1,-1,-1,-1,-1,12,13,-1,-1,-1,-1,-1,-1,-1,-1,14,15,-1,-1,-1,-1,-1,-1,-1,-1,16,17,-1,-1,-1,-1,-1,-1,-1,-1,18,19,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,20,21,-1,-1,-1,-1,-1,-1,-1,-1,22,23,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,24,
25,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,26,27,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,28,
29,-1,-1,-1,-1,-1,-1,-1,-1,30,31,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,32,33,-1,-1,-1,-1,-1,-1,-1,-1,34,35,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,36,37,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,38,39,-1,-1,-1,-1,-1,-1,-1,-1,40,
41,-1,-1,-1,-1,-1,-1,-1,-1,42,43,-1,-1,-1,-1,-1,-1,-1,-1,44,45,-1,-1,-1,-1,-1,-1,-1,-1,46,47,48,49,-1,-1,-1,-1,-1,-1,50,51,-1,-1,-1,-1,-1,-1,-1,-1,52,
53,-1,-1,-1,-1,-1,-1,-1,-1,54,55,-1,-1,-1,-1,-1,-1,-1,-1,56,57,-1,-1,-1,-1,-1,-1,-1,-1,58,59,-1,-1,-1,-1,-1,-1,-1,-1,60,61,-1,-1,-1,-1,-1,-1,-1,-1,62,
63,-1,-1,-1,-1,-1,-1,-1,-1,64,65,-1,-1,-1,-1,-1,-1,-1,-1,66,67,68,69,70,71,-1,-1,-1,-1,72,73,74,75,-1,-1,-1,-1,-1,-1,76,77,-1,-1,-1,-1,-1,-1,-1,-1,78,
79,80,81,-1,-1,-1,-1,-1,-1,82,83,84,85,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,86,
87,-1,-1,-1,-1,-1,-1,-1,-1,88,89,-1,-1,-1,-1,-1,-1,-1,-1,90,91,-1,-1,-1,-1,-1,-1,-1,-1,92,93,-1,-1,-1,-1,-1,-1,-1,-1,94,95,-1,-1,-1,-1,-1,-1,-1,-1,96,
97,-1,-1,-1,-1,-1,-1,-1,-1,98,99,-1,-1,-1,-1,-1,-1,-1,-1,100,101,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,102,103,-1,-1,-1,-1,-1,-1,-1,-1,104,
105,-1,-1,-1,-1,-1,-1,-1,-1,106,107,-1,-1,-1,-1,-1,-1,-1,-1,108,109,-1,-1,-1,-1,-1,-1,-1,-1,110,111,112,113,-1,-1,-1,-1,-1,-1,114,115,116,117,-1,-1,-1,-1,-1,-1,118,
119,-1,-1,-1,-1,-1,-1,-1,-1,120,121,122,123,-1,-1,-1,-1,-1,-1,124,125,-1,-1,-1,-1,-1,-1,-1,-1,126,127,-1,-1,-1,-1,-1,-1,-1,-1,128,129,-1,-1,-1,-1,-1,-1,-1,-1,130,
131,-1,-1,-1,-1,-1,-1,-1,-1,132,133,-1,-1,-1,-1,-1,-1,-1,-1,134,135,136,137,138,139,-1,-1,-1,-1,140,141,142,143,144,145,-1,-1,-1,-1,146,147,-1,-1,-1,-1,-1,-1,-1,-1,148,
149,150,151,152,153,-1,-1,-1,-1,154,155,156,157,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,158,
159,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,160,161,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,162,163,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,164,
165,-1,-1,-1,-1,-1,-1,-1,-1,166,167,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,168,169,-1,-1,-1,-1,-1,-1,-1,-1,170,171,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,172,173,-1,-1,-1,-1,-1,-1,-1,-1,174,175,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,176,177,-1,-1,-1,-1,-1,-1,-1,-1,178,
179,-1,-1,-1,-1,-1,-1,-1,-1,180,181,-1,-1,-1,-1,-1,-1,-1,-1,182,183,-1,-1,-1,-1,-1,-1,-1,-1,184,185,186,187,-1,-1,-1,-1,-1,-1,188,189,-1,-1,-1,-1,-1,-1,-1,-1,190,
191,-1,-1,-1,-1,-1,-1,-1,-1,192,193,194,195,-1,-1,-1,-1,-1,-1,196,197,-1,-1,-1,-1,-1,-1,-1,-1,198,199,-1,-1,-1,-1,-1,-1,-1,-1,200,201,-1,-1,-1,-1,-1,-1,-1,-1,202,
203,-1,-1,-1,-1,-1,-1,-1,-1,204,205,-1,-1,-1,-1,-1,-1,-1,-1,206,207,208,209,210,211,-1,-1,-1,-1,212,213,214,215,-1,-1,-1,-1,-1,-1,216,217,-1,-1,-1,-1,-1,-1,-1,-1,218,
219,220,221,222,223,-1,-1,-1,-1,224,225,226,227,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,
-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,-1,228,
229,-1,-1,-1,-1,-1,-1,-1,-1,230,231,-1,-1,-1,-1,-1,-1,-1,-1,232,233,-1,-1,-1,-1,-1,-1,-1,-1,234,235,-1,-1,-1,-1,-1,-1,-1,-1,236,237,-1,-1,-1,-1,-1,-1,-1,-1,238,
239,-1,-1,-1,-1,-1,-1,-1,-1,240,241,-1,-1,-1,-1,-1,-1,-1,-1,242,243,-1,-1,-1,-1,-1,-1,-1,-1,244,245,-1,-1,-1,-1,-1,-1,-1,-1,246,247,-1,-1,-1,-1,-1,-1,-1,-1,248,
249,-1,-1,-1,-1,-1,-1,-1,-1,250,251,-1,-1,-1,-1,-1,-1,-1,-1,252,253,-1,-1,-1,-1,-1,-1,-1,-1,254,255,256,257,-1,-1,-1,-1,-1,-1,258,259,260,261,-1,-1,-1,-1,-1,-1,262,
263,-1,-1,-1,-1,-1,-1,-1,-1,264,265,266,267,-1,-1,-1,-1,-1,-1,268,269,270,271,-1,-1,-1,-1,-1,-1,272,273,-1,-1,-1,-1,-1,-1,-1,-1,274,275,-1,-1,-1,-1,-1,-1,-1,-1,276,
277,-1,-1,-1,-1,-1,-1,-1,-1,278,279,-1,-1,-1,-1,-1,-1,-1,-1,280,281,282,283,284,285,-1,-1,-1,-1,286,287,288,289,290,291,-1,-1,-1,-1,292,293,-1,-1,-1,-1,-1,-1,-1,-1,294,
295,296,297,298,299,-1,-1,-1,-1,300,301,302,303,304,305,-1,-1,-1,-1,306,307,-1,-1,-1,-1,-1,-1,-1,-1,308,309,-1,-1,-1,-1,-1,-1,-1,-1,310,311,-1,-1,-1,-1,-1,-1,-1,-1,312,
313,-1,-1,-1,-1,-1,-1,-1,-1,314,315,316,317,318,319,-1,-1,-1,-1,320,321,322,323,324,325,-1,-1,-1,-1,326,327,-1,-1,-1,-1,-1,-1,-1,-1,328,329,330,331,332,333,-1,-1,-1,-1,334,
335,336,337,338,339,340,341,-1,-1,342,343,-1,-1,-1,-1,-1,-1,-1,-1,344,345,-1,-1,-1,-1,-1,-1,-1,-1,346,347,-1,-1,-1,-1,-1,-1,-1,-1,348,349,-1,-1,-1,-1,-1,-1,-1,-1,350,
351,352,353,354,355,-1,-1,-1,-1,356,357,358,359,360,361,-1,-1,-1,-1,362,363,-1,-1,-1,-1,-1,-1,-1,-1,364,365,366,367,368,369,-1,-1,-1,-1,370,371,372,373,374,375,376,377,378,379};
__device__ int redindexcpp(int j1, int j2, int h, int j1p, int j2p, int hp, int part) {
return indexdictcpp[part+2*hp+10*j2p+30*j1p+90*h+450*j2+1350*j1];
}
__device__ double nw_comp_matrix[2][2][380];
__device__ double nw_comp(int year_opt,int trig_opt,int j1,int j2,int h,int j1p,int j2p,int hp,int part) {
return nw_comp_matrix[year_opt][trig_opt][redindexcpp(j1,j2,h,j1p,j2p,hp,part)];
}
// ##########################################
// Splines
__device__ double knots_wide[6] = {0.0,0.9,1.4,2.1,3.1,12.0};
__device__ double knots_narrow[6] = {0.0,0.9,1.4,2.0,3.1,12.0};
__device__ double a_2011_L0TIS_wide[5][4] = {{0.0,-0.00112621527248,1.58569721831,-0.819679115309},
{-0.695240812888,2.31634316102,-0.989268755348,0.134011986046},
{-0.968319206849,2.90151114808,-1.40724588896,0.233530351192},
{1.49093519128,-0.611709420676,0.265716286636,-0.0320192004899},
{0.480295052314,0.366329423485,-0.0497801147062,0.00190514374049}};
__device__ double a_2012_L0TIS_wide[5][4] = {{0.0,-0.00108246180814,2.40916212056,-1.38444396395},
{-1.71192960187,5.70534954441,-3.93131788635,0.963881964535},
{1.5097616167,-1.19827449537,0.999842142073,-0.210203756517},
{-1.60039623414,3.24480814869,-1.11591149796,0.125630154599},
{2.3184526579,-0.547626262968,0.107454441287,-0.00591457005072}};
__device__ double a_2011_L0noTIS_wide[5][4] = {{0.0,-0.00098639638413,2.21305126523,-1.24975680956},
{-1.38905016398,4.62918081687,-2.93157897172,0.655661796718},
{0.301283465128,1.00703732593,-0.344333621046,0.0396509989394},
{0.280998193193,1.03601628584,-0.358133125764,0.0418413965137},
{1.65203424402,-0.290792795608,0.0698698037346,-0.00418042386257}};
__device__ double a_2012_L0noTIS_wide[5][4] = {{0.0,-0.00143959572436,4.23974268964,-2.51791074478},
{-3.18832348534,10.6263053554,-7.56886281162,1.85564684828},
{2.64645621312,-1.87679399844,1.36192244113,-0.270730592851},
{-1.04557535896,3.39753681882,-1.14966366233,0.127933868015},
{2.9370024549,-0.456570742976,0.0935968414771,-0.00575005712493}};
__device__ double a_2011_L0TIS_narrow[5][4] = {{0.0,-0.00105007049752,2.10287696037,-1.21206867385},
{-1.42394206761,4.74542348821,-3.17098254931,0.741212626035},
{0.646630522127,0.308482224484,-0.00173878950165,-0.0133692215384},
{0.350645013584,0.752460487299,-0.223727920909,0.0236289670295},
{1.06879626479,0.0574754054844,0.000460815159922,-0.000477348676871}};
__device__ double a_2012_L0TIS_narrow[5][4] = {{0.0,-0.0217677276861,1.92932115124,-1.02522029051},
{-0.924217061529,3.05895581074,-1.49370500257,0.242567173866},
{-1.04708252048,3.32223893706,-1.68176437851,0.287343215756},
{1.38760726219,-0.329795736933,0.144252958489,-0.0169930070766},
{0.873682232338,0.167551066145,-0.0161814941165,0.000258009332581}};
__device__ double a_2011_L0noTIS_narrow[5][4] = {{0.0,-0.00141362744144,7.02846926232,-4.03198237274},
{-4.36857014682,14.5604868619,-9.15142017033,1.96056926898},
{-0.84323191785,7.00619065702,-3.75549430966,0.67582501644},
{5.39200898979,-2.34667070444,0.920936371063,-0.103580097015},
{2.42800757735,0.521717759207,-0.00435023011253,-0.00408691409238}};
__device__ double a_2012_L0noTIS_narrow[5][4] = {{0.0,-0.0135116503737,3.32589922088,-2.04848218086},
{-2.87136660569,9.55771036858,-7.3087919113,1.89029231253},
{3.9544829436,-5.06911009418,3.13893699067,-0.597262187936},
{-1.99801374513,3.85963493892,-1.32543552587,0.146799898155},
{2.48668454014,-0.480395659729,0.0745743446588,-0.00373879760085}};
__device__ double spline_knot(int wide_window,int i) {
if (wide_window == 0) {return knots_narrow[i];}
else {return knots_wide[i];}
}
__device__ double spline_coef(int year_opt,int trig_opt,int wide_window,int ibin,int deg) {
if (year_opt == 0) {
if (trig_opt == 0) {
if (wide_window == 0) {return a_2011_L0TIS_narrow[ibin][deg];}
else {return a_2011_L0TIS_wide[ibin][deg];}
}
else {
if (wide_window == 0) {return a_2011_L0noTIS_narrow[ibin][deg];}
else {return a_2011_L0noTIS_wide[ibin][deg];}
}
}
else {
if (trig_opt == 0) {
if (wide_window == 0) {return a_2012_L0TIS_narrow[ibin][deg];}
else {return a_2012_L0TIS_wide[ibin][deg];}
}
else {
if (wide_window == 0) {return a_2012_L0noTIS_narrow[ibin][deg];}
else {return a_2012_L0noTIS_wide[ibin][deg];}
}
}
}
// ##########################################
// Buffer variables
__device__ const int max_N_events = 100000;
__device__ int wide_window;
__device__ int year_opt[4];
__device__ int trig_opt[4];
__device__ double alt_fit;
__device__ double option;
__device__ double inftres;
__device__ double acctype;
__device__ double A_j1;
__device__ double A_j2;
__device__ double A_h;
__device__ double A_j1p;
__device__ double A_j2p;
__device__ double A_hp;
__device__ double qcomp;
__device__ int decision_SSK[4][max_N_events];
__device__ int decision_OS[4][max_N_events];
__device__ double etamistag_SSK[4][max_N_events];
__device__ double etamistag_OS[4][max_N_events];
__device__ double m1[4][max_N_events];
__device__ double m2[4][max_N_events];
__device__ double cos1[4][max_N_events];
__device__ double cos2[4][max_N_events];
__device__ double phi[4][max_N_events];
__device__ double t[4][max_N_events];
__device__ double t_err[4][max_N_events];
__device__ double max_fun[max_N_events];
__device__ double fun_ran[max_N_events];
__device__ double dec_accepted[max_N_events];
__device__ double m1_MCrew[max_N_events];
__device__ double m2_MCrew[max_N_events];
__device__ double cos1_MCrew[max_N_events];
__device__ double cos2_MCrew[max_N_events];
__device__ double phi_MCrew[max_N_events];
__device__ double IT_cosh_MCrew[max_N_events];
__device__ double IT_sinh_MCrew[max_N_events];
__device__ double IT_cos_MCrew[max_N_events];
__device__ double IT_sin_MCrew[max_N_events];
__device__ double fi_cos1_MCrew[18][max_N_events];
__device__ double fi_cos2_MCrew[18][max_N_events];
__device__ double gi_MCrew[15][max_N_events];
__device__ double reMj1j2_MCrew[3][3][max_N_events];
__device__ double imMj1j2_MCrew[3][3][max_N_events];
__device__ pycuda::complex<double> Mj1j2_MCrew(int j1, int j2, int iev) {
return pycuda::complex<double>(reMj1j2_MCrew[j1][j2][iev],imMj1j2_MCrew[j1][j2][iev]);
}
__device__ double phasespace_MCrew[max_N_events];
__device__ double reA00;
__device__ double reA01;
__device__ double reA10;
__device__ double reA02;
__device__ double reA20;
__device__ double reA110;
__device__ double reA11par;
__device__ double reA11perp;
__device__ double reA120;
__device__ double reA12par;
__device__ double reA12perp;
__device__ double reA210;
__device__ double reA21par;
__device__ double reA21perp;
__device__ double reA220;
__device__ double reA22par;
__device__ double reA22perp;
__device__ double reA22par2;
__device__ double reA22perp2;
__device__ double DCP_SS;
__device__ double DCP_SV;
__device__ double DCP_VS;
__device__ double DCP_ST;
__device__ double DCP_TS;
__device__ double DCP;
__device__ double DCP_VT;
__device__ double DCP_TV;
__device__ double DCP_TT;
__device__ double imA00;
__device__ double imA01;
__device__ double imA10;
__device__ double imA02;
__device__ double imA20;
__device__ double imA110;
__device__ double imA11par;
__device__ double imA11perp;
__device__ double imA120;
__device__ double imA12par;
__device__ double imA12perp;
__device__ double imA210;
__device__ double imA21par;
__device__ double imA21perp;
__device__ double imA220;
__device__ double imA22par;
__device__ double imA22perp;
__device__ double imA22par2;
__device__ double imA22perp2;
__device__ double phis;
__device__ double dphi_SS;
__device__ double dphi_SV;
__device__ double dphi_VS;
__device__ double dphi_ST;
__device__ double dphi_TS;
__device__ double dphi_VT;
__device__ double dphi_TV;
__device__ double dphi_TT;
__device__ double delta_m_freq;
__device__ double gamma_Bs_freq;
__device__ double delta_gamma_freq;
__device__ double p0metac_tag_SSK;
__device__ double p0metac_tag_OS;
__device__ double Dp0half_tag_SSK;
__device__ double Dp0half_tag_OS;
__device__ double p1_tag_SSK;
__device__ double p1_tag_OS;
__device__ double Dp1half_tag_SSK;
__device__ double Dp1half_tag_OS;
__device__ double etac_tag_SSK;
__device__ double etac_tag_OS;
__device__ double deltatmean_tres_11;
__device__ double p0_tres_11;
__device__ double p1_tres_11;
__device__ double deltatmean_tres_12;
__device__ double p0_tres_12;
__device__ double p1_tres_12;
__device__ double mv;
__device__ double ms;
__device__ double mt;
__device__ double gv;
__device__ double gs;
__device__ double gt;
__device__ double c1_mass_swave;
__device__ double c2_mass_swave;
__device__ double c3_mass_swave;
__device__ double c4_mass_swave;
__device__ double c5_mass_swave;
__device__ double c6_mass_swave;
__device__ double c7_mass_swave;
__device__ double c8_mass_swave;
__device__ double c9_mass_swave;
__device__ double res_mass;
__device__ double tag_eff_SSK;
__device__ double mu1_SSK;
__device__ double sigma1_SSK;
__device__ double c_SSK;
__device__ double mu2_SSK;
__device__ double sigma2_SSK;
__device__ double tag_eff_OS;
__device__ double mu1_OS;
__device__ double sigma1_OS;
__device__ double c_OS;
__device__ double mu2_OS;
__device__ double sigma2_OS;
__device__ double gamma1_dt;
__device__ double beta1_dt;
__device__ double c_dt;
__device__ double gamma2_dt;
__device__ double beta2_dt;
__device__ double pw_mass_altmodel;
__device__ double f_1410_rel2_892;
__device__ double delta_1410_rel2_892;
__device__ double f_1680_rel2_892;
__device__ double delta_1680_rel2_892;
__device__ double Im00;
__device__ double Im01;
__device__ double Im10;
__device__ double Im02;
__device__ double Im20;
__device__ double Im11;
__device__ double Im12;
__device__ double Im21;
__device__ double Im22;
__device__ double Ih1Re;
__device__ double Ih2Re;
__device__ double Ih3Re;
__device__ double Ih4Re;
__device__ double Ih5Re;
__device__ double Ih6Re;
__device__ double Ih7Re;
__device__ double Ih8Re;
__device__ double Ih9Re;
__device__ double Ih10Re;
__device__ double Ih11Re;
__device__ double Ih12Re;
__device__ double Ih13Re;
__device__ double Ih14Re;
__device__ double Ih15Re;
__device__ double Ih16Re;
__device__ double Ih17Re;
__device__ double Ih18Re;
__device__ double Ih19Re;
__device__ double Ih20Re;
__device__ double Ih21Re;
__device__ double Ih22Re;
__device__ double Ih23Re;
__device__ double Ih24Re;
__device__ double Ih25Re;
__device__ double Ih26Re;
__device__ double Ih27Re;
__device__ double Ih28Re;
__device__ double Ih29Re;
__device__ double Ih30Re;
__device__ double Ih1Im;
__device__ double Ih2Im;
__device__ double Ih3Im;
__device__ double Ih4Im;
__device__ double Ih5Im;
__device__ double Ih6Im;
__device__ double Ih7Im;
__device__ double Ih8Im;
__device__ double Ih9Im;
__device__ double Ih10Im;
__device__ double Ih11Im;
__device__ double Ih12Im;
__device__ double Ih13Im;
__device__ double Ih14Im;
__device__ double Ih15Im;
__device__ double Ih16Im;
__device__ double Ih17Im;
__device__ double Ih18Im;
__device__ double Ih19Im;
__device__ double Ih20Im;
__device__ double Ih21Im;
__device__ double If1;
__device__ double If2;
__device__ double If3;
__device__ double If4;
__device__ double If5;
__device__ double If6;
__device__ double If7;
__device__ double If8;
__device__ double If9;
__device__ double If10;
__device__ double If11;
__device__ double If12;
__device__ double If13;
__device__ double If14;
__device__ double If15;
__device__ double If16;
__device__ double If17;
__device__ double If18;
__device__ double reAj1j2h_temp[3][3][5];
__device__ double imAj1j2h_temp[3][3][5];
__device__ double reAbarj1j2h_temp[3][3][5];
__device__ double imAbarj1j2h_temp[3][3][5];
__device__ pycuda::complex<double> Aj1j2h_temp(int j1, int j2, int h) {
return pycuda::complex<double>(reAj1j2h_temp[j1][j2][h],imAj1j2h_temp[j1][j2][h]);
}
__device__ pycuda::complex<double> Abarj1j2h_temp(int j1, int j2, int h) {
return pycuda::complex<double>(reAbarj1j2h_temp[j1][j2][h],imAbarj1j2h_temp[j1][j2][h]);
}
__device__ double T_cosh_temp[4][max_N_events];
__device__ double T_sinh_temp[4][max_N_events];
__device__ double T_cos_temp[4][max_N_events];
__device__ double T_sin_temp[4][max_N_events];
__device__ double IT_cosh_temp_deltat[4][max_N_events];
__device__ double IT_sinh_temp_deltat[4][max_N_events];
__device__ double IT_cos_temp_deltat[4][max_N_events];
__device__ double IT_sin_temp_deltat[4][max_N_events];
__device__ double zeta_temp[4][max_N_events];
__device__ double DCP_tzero_temp[4][max_N_events];
__device__ double fi_cos1_temp[18][4][max_N_events];
__device__ double fi_cos2_temp[18][4][max_N_events];
__device__ double gi_temp[15][4][max_N_events];
__device__ double reMj1j2_temp[3][3][4][max_N_events];
__device__ double imMj1j2_temp[3][3][4][max_N_events];
__device__ double phasespace_temp[4][max_N_events];
__device__ double reIhj1j2j1pj2pdict[3][3][3][3];
__device__ double imIhj1j2j1pj2pdict[3][3][3][3];
__device__ pycuda::complex<double> Mj1j2_temp(int j1, int j2, int icat, int iev) {
return pycuda::complex<double>(reMj1j2_temp[j1][j2][icat][iev],imMj1j2_temp[j1][j2][icat][iev]);
}
__device__ pycuda::complex<double> Ihj1j2j1pj2p(int j1, int j2, int j1p, int j2p) {
return pycuda::complex<double>(reIhj1j2j1pj2pdict[j1][j2][j1p][j2p],imIhj1j2j1pj2pdict[j1][j2][j1p][j2p]);
}
// ##########################################
// Toy MC generation variables
__device__ double knots_gen_wide[6] = {0.0,0.9,1.3,1.9,3.0,12.0};
__device__ double knots_gen_narrow[6] = {0.0,0.9,1.3,1.9,3.0,12.0};
__device__ double a_gen_wide[5][4] = {{0.0,-0.00138436998913,2.5481847953,-1.45909728079},
{-1.6653800648,5.54988251268,-3.61988951878,0.82537468739},
{-0.289336418837,2.37439717584,-1.17720849044,0.199046218586},
{0.993185871959,0.349361979846,-0.111400492548,0.0120623593064},
{1.32606052325,0.0164873285591,-0.000442275452223,-0.000266331481965}};
__device__ double a_gen_narrow[5][4] = {{0.0,0.00101382530285,4.89487359849,-2.83048035352},
{-3.54249846114,11.8093420291,-8.22549107238,2.02891396902},
{1.06333885612,1.18048668157,-0.0494484973637,-0.0675072040589},
{-0.421082535913,3.52430993215,-1.28303968188,0.148912301997},
{3.78015377185,-0.67692637561,0.117372420705,-0.006689042735}};
__device__ double k1_gen(int wide_window) {
if (wide_window) {return -0.40631262195;}
else {return -0.505556252411;}
}
__device__ double k2_gen(int wide_window) {
if (wide_window) {return -0.39861379722;}
else {return -0.404368705592;}
}
__device__ double k3_gen(int wide_window) {
if (wide_window) {return -0.0363987194893;}
else {return -0.0483750503137;}
}
__device__ double k4_gen(int wide_window) {
if (wide_window) {return -0.0644151228873;}
else {return -0.0175772310185;}
}
__device__ double k5_gen(int wide_window) {
if (wide_window) {return 0.0270906873059;}
else {return 0.0389936024545;}
}
__device__ double p1_gen(int wide_window) {
if (wide_window) {return -0.000100573256821;}
else {return 4.35273527839e-05;}
}
__device__ double knot_gen(int wide_window,int i) {
if (wide_window == 0) {return knots_gen_narrow[i];}
else {return knots_gen_wide[i];}
}
__device__ double coef_gen(int wide_window,int ibin,int deg) {
if (wide_window == 0) {return a_gen_narrow[ibin][deg];}
else {return a_gen_wide[ibin][deg];}
}
__device__ double accGenTime(double tau) {
int tau_bin;
if (tau < knot_gen(wide_window,1)) {tau_bin = 0;}
else if ((tau >= knot_gen(wide_window,1)) and (tau < knot_gen(wide_window,2))) {tau_bin = 1;}
else if ((tau >= knot_gen(wide_window,2)) and (tau < knot_gen(wide_window,3))) {tau_bin = 2;}
else if ((tau >= knot_gen(wide_window,3)) and (tau < knot_gen(wide_window,4))) {tau_bin = 3;}
else {tau_bin = 4;}
return coef_gen(wide_window,tau_bin,0)+tau*coef_gen(wide_window,tau_bin,1)+tau*tau*coef_gen(wide_window,tau_bin,2)+tau*tau*tau*coef_gen(wide_window,tau_bin,3);
}
__device__ double accGenAng(double x) {
return 1.+k1_gen(wide_window)*x+k2_gen(wide_window)*(2.*x*x-1.)+k3_gen(wide_window)*(4.*x*x*x-3.*x)+k4_gen(wide_window)*(8.*x*x*x*x-8.*x*x+1.)+k5_gen(wide_window)*(16.*x*x*x*x*x-20.*x*x*x+5.*x);
}
__device__ double accGenMass(double m) {
return 1. + p1_gen(wide_window)*m;
}
__device__ double accGen(double tau, double ma, double mb, double cos1var, double cos2var, double phivar) {
return accGenTime(tau)*accGenMass(ma)*accGenMass(mb)*accGenAng(cos1var)*accGenAng(cos2var);
}
// ##########################################
// Physical terms
__device__ double reAj1j2h(int j1, int j2, int h) {
switch(j1) {
case 0 :
switch(j2) {
case 0 : return reA00;
case 1 : return reA01;
case 2 : return reA02;
}
case 1 :
switch(j2) {
case 0 : return reA10;
case 1 :
switch(h) {
case 0 : return reA110;
case 1 : return reA11par;
case 2 : return reA11perp;
}
case 2 :
switch(h) {
case 0 : return reA120;
case 1 : return reA12par;
case 2 : return reA12perp;
}
}
case 2 :
switch(j2) {
case 0 : return reA20;
case 1 :
switch(h) {
case 0 : return reA210;
case 1 : return reA21par;
case 2 : return reA21perp;
}
case 2 :
switch(h) {
case 0 : return reA220;
case 1 : return reA22par;
case 2 : return reA22perp;
case 3 : return reA22par2;
case 4 : return reA22perp2;
}
}
}
return 0.;
}
__device__ double imAj1j2h(int j1, int j2, int h) {
switch(j1) {
case 0 :
switch(j2) {
case 0 : return imA00;
case 1 : return imA01;
case 2 : return imA02;
}
case 1 :
switch(j2) {
case 0 : return imA10;
case 1 :
switch(h) {
case 0 : return imA110;
case 1 : return imA11par;
case 2 : return imA11perp;
}
case 2 :
switch(h) {
case 0 : return imA120;
case 1 : return imA12par;
case 2 : return imA12perp;
}
}
case 2 :
switch(j2) {
case 0 : return imA20;
case 1 :
switch(h) {
case 0 : return imA210;
case 1 : return imA21par;
case 2 : return imA21perp;
}
case 2 :
switch(h) {
case 0 : return imA220;
case 1 : return imA22par;
case 2 : return imA22perp;
case 3 : return imA22par2;
case 4 : return imA22perp2;
}
}
}
return 0.;
}
__device__ double DCPj1j2(int j1, int j2) {
switch(j1) {
case 0 :
switch(j2) {
case 0 : return DCP;//+DCP_SS;
case 1 : return DCP;//+DCP_SV;
case 2 : return DCP;//+DCP_TT;//DCP_ST;
}
case 1 :
switch(j2) {
case 0 : return DCP;//+DCP_VS;
case 1 : return DCP;
case 2 : return DCP;//+DCP_TT;//DCP_VT;
}
case 2 :
switch(j2) {
case 0 : return DCP;//+DCP_TT;//DCP_TS;
case 1 : return DCP;//+DCP_TT;//DCP_TV;
case 2 : return DCP;//+DCP_TT;
}
}
return 0;
}
__device__ double dphij1j2(int j1, int j2) {
switch(j1) {
case 0 :
switch(j2) {
case 0 : return dphi_SS;
case 1 : return dphi_SV;
case 2 : return dphi_ST;
}
case 1 :
switch(j2) {
case 0 : return dphi_VS;
case 1 : return 0.;
case 2 : return dphi_VT;
}
case 2 :
switch(j2) {
case 0 : return dphi_TS;
case 1 : return dphi_TV;
case 2 : return dphi_TT;
}
}
return 0;
}
__device__ double etah(int h) {
if ((h == 2) or (h == 4)) {return -1.;}
else {return 1.;}
}
__device__ double etaj1j2h(int j1, int j2, int h) {
return pow(-1.,j1+j2)*etah(h);
}
__device__ pycuda::complex<double> Aj1j2h(int j1, int j2, int h) {
pycuda::complex<double> I(0.,1.);
return pycuda::complex<double>(reAj1j2h(j1,j2,h),imAj1j2h(j1,j2,h))*pycuda::complex<double>(sqrt(1.+DCPj1j2(j1,j2)))*exp(I*0.5*(phis+dphij1j2(j1,j2)));
}
__device__ pycuda::complex<double> Abarj1j2h(int j1, int j2, int h) {
pycuda::complex<double> I(0.,1.);
return etaj1j2h(j2,j1,h)*pycuda::complex<double>(reAj1j2h(j2,j1,h),imAj1j2h(j2,j1,h))*pycuda::complex<double>(sqrt(1.-DCPj1j2(j2,j1)))*exp(-I*0.5*(phis+dphij1j2(j2,j1)));
}
__device__ pycuda::complex<double> M_Average(int j1, int j2, int h, int j1p, int j2p, int hp) {
return Aj1j2h_temp(j1,j2,h)*pycuda::conj(Aj1j2h_temp(j1p,j2p,hp))+Abarj1j2h_temp(j1,j2,h)*pycuda::conj(Abarj1j2h_temp(j1p,j2p,hp));
}
__device__ pycuda::complex<double> M_DeltaGamma(int j1, int j2, int h, int j1p, int j2p, int hp) {
return Aj1j2h_temp(j1,j2,h)*pycuda::conj(Abarj1j2h_temp(j1p,j2p,hp))+Abarj1j2h_temp(j1,j2,h)*pycuda::conj(Aj1j2h_temp(j1p,j2p,hp));
}
__device__ pycuda::complex<double> M_DirCP(int j1, int j2, int h, int j1p, int j2p, int hp) {
return Aj1j2h_temp(j1,j2,h)*pycuda::conj(Aj1j2h_temp(j1p,j2p,hp))-Abarj1j2h_temp(j1,j2,h)*pycuda::conj(Abarj1j2h_temp(j1p,j2p,hp));
}
__device__ pycuda::complex<double> M_MixCP(int j1, int j2, int h, int j1p, int j2p, int hp) {
return pycuda::complex<double>(0.,-1.)*(Aj1j2h_temp(j1,j2,h)*pycuda::conj(Abarj1j2h_temp(j1p,j2p,hp))-Abarj1j2h_temp(j1,j2,h)*pycuda::conj(Aj1j2h_temp(j1p,j2p,hp)));
}
// ##########################################
// Flavour tagging terms
__device__ double omega_SSK(double eta) {
return (p0metac_tag_SSK+etac_tag_SSK+Dp0half_tag_SSK)+(p1_tag_SSK+Dp1half_tag_SSK)*(eta-etac_tag_SSK);
}
__device__ double omegabar_SSK(double eta) {
return (p0metac_tag_SSK+etac_tag_SSK-Dp0half_tag_SSK)+(p1_tag_SSK-Dp1half_tag_SSK)*(eta-etac_tag_SSK);
}
__device__ double omega_OS(double eta) {
return (p0metac_tag_OS+etac_tag_OS+Dp0half_tag_OS)+(p1_tag_OS+Dp1half_tag_OS)*(eta-etac_tag_OS);
}
__device__ double omegabar_OS(double eta) {
return (p0metac_tag_OS+etac_tag_OS-Dp0half_tag_OS)+(p1_tag_OS-Dp1half_tag_OS)*(eta-etac_tag_OS);
}
__device__ double P_Bs(int q1, int q2, double eta1, double eta2) {
return (1.+0.5*q1*(1.-q1-2.*omega_SSK(eta1)))*(1.+0.5*q2*(1.-q2-2.*omega_OS(eta2)));
}
__device__ double P_Bsbar(int q1, int q2, double eta1, double eta2) {
return (1.-0.5*q1*(1.+q1-2.*omegabar_SSK(eta1)))*(1.-0.5*q2*(1.+q2-2.*omegabar_OS(eta2)));
}
__device__ double zeta(int q1, int q2, double eta1, double eta2) {
return 0.5*((1.+DCP_prod)*P_Bs(q1,q2,eta1,eta2)+(1.-DCP_prod)*P_Bsbar(q1,q2,eta1,eta2));
}
__device__ double DCP_tzero(int q1, int q2, double eta1, double eta2) {
return 0.5/zeta(q1,q2,eta1,eta2)*((1.+DCP_prod)*P_Bs(q1,q2,eta1,eta2)-(1.-DCP_prod)*P_Bsbar(q1,q2,eta1,eta2));
}
// ##########################################
// Time dependent terms
#define errf_const 1.12837916709551
#define xLim 5.33
#define yLim 4.29
__device__ pycuda::complex<double> faddeeva(pycuda::complex<double> z) {
double in_real = pycuda::real(z);
double in_imag = pycuda::imag(z);
int n, nc, nu;
double h, q, Saux, Sx, Sy, Tn, Tx, Ty, Wx, Wy, xh, xl, x, yh, y;
double Rx [33];
double Ry [33];
x = fabs(in_real);
y = fabs(in_imag);
if (y < yLim && x < xLim) {
q = (1.0 - y / yLim) * sqrt(1.0 - (x / xLim) * (x / xLim));
h = 1.0 / (3.2 * q);
nc = 7 + int(23.0 * q);
xl = pow(h, double(1 - nc));
xh = y + 0.5 / h;
yh = x;
nu = 10 + int(21.0 * q);
Rx[nu] = 0.;
Ry[nu] = 0.;
for (n = nu; n > 0; n--){
Tx = xh + n * Rx[n];
Ty = yh - n * Ry[n];
Tn = Tx*Tx + Ty*Ty;
Rx[n-1] = 0.5 * Tx / Tn;
Ry[n-1] = 0.5 * Ty / Tn;
}
Sx = 0.;
Sy = 0.;
for (n = nc; n>0; n--){
Saux = Sx + xl;
Sx = Rx[n-1] * Saux - Ry[n-1] * Sy;
Sy = Rx[n-1] * Sy + Ry[n-1] * Saux;
xl = h * xl;
};
Wx = errf_const * Sx;
Wy = errf_const * Sy;
}
else {
xh = y;
yh = x;
Rx[0] = 0.;
Ry[0] = 0.;
for (n = 9; n>0; n--){
Tx = xh + n * Rx[0];
Ty = yh - n * Ry[0];
Tn = Tx * Tx + Ty * Ty;
Rx[0] = 0.5 * Tx / Tn;
Ry[0] = 0.5 * Ty / Tn;
};
Wx = errf_const * Rx[0];
Wy = errf_const * Ry[0];
}
if (y == 0.) {
Wx = exp(-x * x);
}
if (in_imag < 0.) {
Wx = 2.0 * exp(y * y - x * x) * cos(2.0 * x * y) - Wx;
Wy = - 2.0 * exp(y * y - x * x) * sin(2.0 * x * y) - Wy;
if (in_real > 0.) {
Wy = -Wy;
}
}
else if (in_real < 0.) {
Wy = -Wy;
}
return pycuda::complex<double>(Wx,Wy);
}
__device__ pycuda::complex<double> conv_exp(double x, pycuda::complex<double> z) {
double re = pycuda::real(z)-x;
if (re>-5.0) {return 0.5*faddeeva(pycuda::complex<double>(-pycuda::real(z),re))*exp(-x*x);}
else {
pycuda::complex<double> mi(0,-1);
pycuda::complex<double> zp = mi*(z-x);
pycuda::complex<double> zsq = zp*zp;
pycuda::complex<double> v = -zsq -x*x;
pycuda::complex<double> iz(pycuda::real(z)+x,pycuda::real(z)-x);
return 0.5*exp(v)*(exp(zsq)/(iz*sqrt(pi)) + 1.)*2. ;
}
}
__device__ pycuda::complex<double> Kn(pycuda::complex<double> z, int n) {
if (n == 0) {return 1./(2.*z);}
else if (n == 1) {return 1./(2.*z*z);}
else if (n == 2) {return 1./z*(1.+1./(z*z));}
else if (n == 3) {return 3./(z*z)*(1.+1./(z*z));}
return pycuda::complex<double>(0.,0.);
}
__device__ pycuda::complex<double> Mn_x(double x, pycuda::complex<double> z, int n) {
if (n == 0) {return pycuda::complex<double>(erf(x),0.)-2.*conv_exp(x,z);}
else if (n == 1) {return 2.*(-pycuda::complex<double>(sqrt(1./pi)*exp(-x*x),0.)-2.*x*conv_exp(x,z));}
else if (n == 2) {return 2.*(-2.*x*exp(-x*x)*pycuda::complex<double>(sqrt(1./pi),0.)-(2.*x*x-1.)*2.*conv_exp(x,z));}
else if (n == 3) {return 4.*(-(2.*x*x-1.)*exp(-x*x)*pycuda::complex<double>(sqrt(1./pi),0.)-x*(2.*x*x-3.)*2.*conv_exp(x,z));}
return pycuda::complex<double>(0.,0.);
}
__device__ pycuda::complex<double> Mn(double x_1, double x_2, pycuda::complex<double> z, int n) {
return Mn_x(x_2,z,n)-Mn_x(x_1,z,n);
}
__device__ pycuda::complex<double> Tj1j2hj1pj2php(int j1, int j2, int h, int j1p, int j2p, int hp, int icat, int iev) {
return zeta_temp[icat][iev]*((T_cosh_temp[icat][iev]*M_Average(j1,j2,h,j1p,j2p,hp)-T_sinh_temp[icat][iev]*M_DeltaGamma(j1,j2,h,j1p,j2p,hp))+DCP_tzero_temp[icat][iev]*(T_cos_temp[icat][iev]*M_DirCP(j1,j2,h,j1p,j2p,hp)+T_sin_temp[icat][iev]*M_MixCP(j1,j2,h,j1p,j2p,hp)));
}
__device__ pycuda::complex<double> ITj1j2hj1pj2php_deltat(int j1, int j2, int h, int j1p, int j2p, int hp, int icat, int iev) {
return (IT_cosh_temp_deltat[icat][iev]*M_Average(j1,j2,h,j1p,j2p,hp)-IT_sinh_temp_deltat[icat][iev]*M_DeltaGamma(j1,j2,h,j1p,j2p,hp))+DCP_prod*(IT_cos_temp_deltat[icat][iev]*M_DirCP(j1,j2,h,j1p,j2p,hp)+IT_sin_temp_deltat[icat][iev]*M_MixCP(j1,j2,h,j1p,j2p,hp));
}
// ##########################################
// Angular terms
__device__ double fi(double x, int i) {
switch(i) {
case 1 : return 1.;
case 2 : return x;
case 3 : return sqrt(1.-x*x);
case 4 : return 3.*x*x-1.;
case 5 : return x*sqrt(1.-x*x);
case 6 : return x*x;
case 7 : return x*(3.*x*x-1.);
case 8 : return x*x*sqrt(1.-x*x);
case 9 : return 1.-x*x;
case 10 : return (3.*x*x-1.)*sqrt(1.-x*x);
case 11 : return x*(1.-x*x);
case 12 : return (3.*x*x-1.)*(3.*x*x-1.);
case 13 : return x*(3.*x*x-1.)*sqrt(1.-x*x);
case 14 : return x*x*(1.-x*x);
case 15 : return (1.-x*x)*sqrt(1.-x*x);
case 16 : return (3.*x*x-1.)*(1.-x*x);
case 17 : return x*(1.-x*x)*sqrt(1.-x*x);
case 18 : return (1.-x*x)*(1.-x*x);
}
return 0.;
}
__device__ double gi(double x, int i) {
switch(i) {
case 1 : return 1.;
case 2 : return cos(x);
case 3 : return sin(x);
case 4 : return cos(x)*cos(x);
case 5 : return sin(x)*cos(x);
case 6 : return sin(x)*sin(x);
case 7 : return cos(2.*x);
case 8 : return sin(2.*x);
case 9 : return cos(x)*cos(2.*x);
case 10 : return cos(x)*sin(2.*x);
case 11 : return sin(x)*cos(2.*x);
case 12 : return sin(x)*sin(2.*x);
case 13 : return cos(2.*x)*cos(2.*x);
case 14 : return sin(2.*x)*cos(2.*x);
case 15 : return sin(2.*x)*sin(2.*x);
}
return 0.;
}
__device__ double fjjphhp_cos1(int j, int jp, int h, int hp, int icat, int iev) {
return fi_cos1_temp[(int) fjjphhpindexdict[j][jp][h][hp]-1][icat][iev];
}
__device__ double fjjphhp_cos2(int j, int jp, int h, int hp, int icat, int iev) {
return fi_cos2_temp[(int) fjjphhpindexdict[j][jp][h][hp]-1][icat][iev];
}
__device__ double ghhp_phi(int h, int hp, int icat, int iev) {
return gi_temp[(int) ghhpindexdict[h][hp]-1][icat][iev];
}
__device__ pycuda::complex<double> Nj1j2hj1pj2php(int j1, int j2, int h, int j1p, int j2p, int hp) {
return Nj1j2hdict(j1,j2,h)*pycuda::conj(Nj1j2hdict(j1p,j2p,hp));
}
// ##########################################
// Mass dependent terms
__device__ double get_q(double M, double ma, double mb) {
double M2 = M*M;
double m12 = ma*ma;
double m22 = mb*mb;
double q2 = .25*( M2*M2 - 2*M2*(m12+m22) +(m12*m12+m22*m22)-2*m12*m22) /M2;
if (q2<0) {return 0.;}
return sqrt(q2);
}
__device__ double Blatt_Weisskopf2(double q, double q0, int L) {
if (L<1.) {return 1.;}
double d = 1.6e-03;
double z = q*d*q*d;
double z0 = q0*d*q0*d;
if (L==1) {return (1+z0)/(1+z);}
else if (L==2) {return ((z0-3)*(z0-3) + 9*z0) / ((z-3)*(z-3) + 9*z);}
else if (L==3) {return (z0*(z0-15)*(z0-15) + 9*(z0-5)) / (z*(z-15)*(z-15) + 9*(z-5));}
return ( pow(z0*z0 -45*z0+105,2) +25*z0*(2*z0-21)*(2*z0-21)) /(pow(z*z -45*z+105,2) +25*z*(2*z-21)*(2*z-21));
}
__device__ double FL_j1j2(int j1, int j2, double ma, double mb) {
double p = get_q(MBs,ma,mb);
double q_1 = get_q(ma,MPion,MKaon);
double q_2 = get_q(mb,MPion,MKaon);
double m0 = 895.81;
double p0 = get_q(MBs,m0,m0);
double q0 = get_q(m0,MPion,MKaon);
//int L;
//if (j1*j2>0) {L = abs(j1-j2)+1;}
//else {L = abs(j1-j2);}
int L = abs(j1-j2);
double FL_Bs = pow(p/p0,L)*sqrt(Blatt_Weisskopf2(p,p0,L));
double FL_Kpi1 = pow((q_1/q0),j1)*sqrt(Blatt_Weisskopf2(q_1,q0,j1));
double FL_Kpi2 = pow((q_2/q0),j2)*sqrt(Blatt_Weisskopf2(q_2,q0,j2));
return FL_Bs*FL_Kpi1*FL_Kpi2;
}
__device__ pycuda::complex<double> Resonance(double m, double m0, double g0, int J) {
double q = get_q(m,MPion,MKaon);
double q0 = get_q(m0,MPion,MKaon);
double gamma = g0*pow(q/q0,2*J+1)*(m0/m)*Blatt_Weisskopf2(q,q0,J);
pycuda::complex<double> num(m0*g0,0.);
pycuda::complex<double> denom(m0*m0-m*m,-m0*gamma);
pycuda::complex<double> BW = num/denom;
pycuda::complex<double> I(0.,1.);
if (J == 1) {return BW*exp(-I*1.5707963267948966);}
else if (J == 2) {return BW*exp(-I*0.006008360479292941);}
return BW;
}
__device__ double omega_Stheo(double m) {
double m_GeV = m/1000.;
double svar_GeV = m_GeV*m_GeV;
double Delta_Kpi = MKaon/1000.*MKaon/1000.-MPion/1000.*MPion/1000.;
double y_s = pow((svar_GeV-Delta_Kpi)/(svar_GeV+Delta_Kpi),2);
double y_s0 = pow((s0_Stheo-Delta_Kpi)/(s0_Stheo+Delta_Kpi),2);
return (sqrt(y_s)-alpha_Stheo*sqrt(y_s0-y_s))/(sqrt(y_s)+alpha_Stheo*sqrt(y_s0-y_s));
}
__device__ double cotdelta_Stheo(double m) {
double m_GeV = m/1000.;
double svar_GeV = m_GeV*m_GeV;
double q_Kpi_GeV = get_q(m,MKaon,MPion)/1000.;
double omega_func = omega_Stheo(m);
return m_GeV/(2.*q_Kpi_GeV*(svar_GeV-sAdler_Stheo))*(B0_Stheo+B1_Stheo*omega_func);
}
__device__ double get_p1_Stheo(double q) {
return 1.+a_Stheo*q/1000.*q/1000.+b_Stheo*q/1000.*q/1000.*q/1000.*q/1000.;
}
__device__ double get_p2_Stheo(double q) {
return 1.+c_Stheo*q/1000.*q/1000.;
}
__device__ double Theta_Keta(double m) {
if (m>=(MKaon+MEta)) {return 1.;}
return 0.;
}
__device__ pycuda::complex<double> Prop_Stheo(double m) {
pycuda::complex<double> I(0.,1.);
double m_GeV = m/1000.;
double svar_GeV = m_GeV*m_GeV;
double q_Kpi_GeV = get_q(m,MKaon,MPion)/1000.;
double q_Keta_GeV = get_q(m,MKaon,MEta)/1000.;
pycuda::complex<double> T;
if (m<(MKaon+MEta)) {
T = 1./(cotdelta_Stheo(m)-I);
}
else {
pycuda::complex<double> S0b = exp(I*2.*q_Keta_GeV*(phi0_Stheo+phi1_Stheo*q_Keta_GeV*q_Keta_GeV));
double q_Kpi_r1_GeV = get_q(sqrtsr1_Stheo*1000.,MKaon,MPion)/1000.;
double q_Keta_r1_GeV = get_q(sqrtsr1_Stheo*1000.,MKaon,MEta)/1000.;
double q_Kpi_r2_GeV = get_q(sqrtsr2_Stheo*1000.,MKaon,MPion)/1000.;
double q_Keta_r2_GeV = get_q(sqrtsr2_Stheo*1000.,MKaon,MEta)/1000.;
double q_Kpi_hat_GeV = get_q(MKaon+MEta,MKaon,MPion)/1000.;
double beta_Stheo = 1./cotdelta_Stheo(MKaon+MEta);
double P1_s = (sqrtsr1_Stheo*sqrtsr1_Stheo-svar_GeV)*beta_Stheo+e1_Stheo*G1_Stheo*(get_p1_Stheo(q_Kpi_GeV*1000.)*(q_Kpi_GeV-q_Kpi_hat_GeV))/(get_p1_Stheo(q_Kpi_r1_GeV*1000.)*(q_Kpi_r1_GeV-q_Kpi_hat_GeV));
double Q1_s = (1.-e1_Stheo)*G1_Stheo*get_p1_Stheo(q_Kpi_GeV*1000.)/get_p1_Stheo(q_Kpi_r1_GeV*1000.)*q_Keta_GeV/q_Keta_r1_GeV*Theta_Keta(m);
double P2_s = e2_Stheo*G2_Stheo*(get_p2_Stheo(q_Kpi_GeV*1000.)*(q_Kpi_GeV-q_Kpi_hat_GeV))/(get_p2_Stheo(q_Kpi_r2_GeV*1000.)*(q_Kpi_r2_GeV-q_Kpi_hat_GeV));
double Q2_s = (1.-e2_Stheo)*G2_Stheo*get_p2_Stheo(q_Kpi_GeV*1000.)/get_p2_Stheo(q_Kpi_r2_GeV*1000.)*q_Keta_GeV/q_Keta_r2_GeV*Theta_Keta(m);
pycuda::complex<double> S1r = (sqrtsr1_Stheo*sqrtsr1_Stheo-svar_GeV+I*(P1_s-Q1_s))/(sqrtsr1_Stheo*sqrtsr1_Stheo-svar_GeV-I*(P1_s+Q1_s));
pycuda::complex<double> S2r = (sqrtsr2_Stheo*sqrtsr2_Stheo-svar_GeV+I*(P2_s-Q2_s))/(sqrtsr2_Stheo*sqrtsr2_Stheo-svar_GeV-I*(P2_s+Q2_s));
T = (S0b*S1r*S2r-1.)/(2.*I);
}
double xm = (m-1175.)/425.;
double modulus = 1.+c1_mass_swave*xm+c2_mass_swave*(2.*xm*xm-1.)+c3_mass_swave*(4.*xm*xm*xm-3.*xm)+c4_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
return pycuda::complex<double>(modulus)*exp(I*(pycuda::arg(T)-0.7095863518296103));
}
__device__ pycuda::complex<double> Prop_ModInd(double m) {
double xm = (m-1175.)/425.;
double re_T = 1.+c1_mass_swave*xm+c2_mass_swave*(2.*xm*xm-1.)+c3_mass_swave*(4.*xm*xm*xm-3.*xm)+c4_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
double im_T = c5_mass_swave+c6_mass_swave*xm+c7_mass_swave*(2.*xm*xm-1.)+c8_mass_swave*(4.*xm*xm*xm-3.*xm)+c9_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
pycuda::complex<double> T(re_T,im_T);
return T;
}
__device__ pycuda::complex<double> Prop_S_Palano(double m) {
pycuda::complex<double> i(0.,1.);
double m_GeV = m/1000.;
double svar_GeV = m_GeV*m_GeV;
double q_Kpi_GeV = get_q(m,MKaon,MPion)/1000.;
double q_Keta_GeV = get_q(m,MKaon,MEta)/1000.;
double rho_1 = 2.*q_Kpi_GeV/m_GeV;
double rho_2 = 2.*q_Keta_GeV/m_GeV;
double sbot_GeV = 0.36;
double stop_GeV = 5.832;
double X = (2.*svar_GeV-(stop_GeV+sbot_GeV))/(stop_GeV-sbot_GeV);
double K11 = (svar_GeV-s_A_palano)/s_Kpi_palano*(g_1_a_palano*g_1_a_palano/(svar_GeV-s_a_palano)+g_1_b_palano*g_1_b_palano/(svar_GeV-s_b_palano)+C_11_0_palano+C_11_1_palano*X+C_11_2_palano*X*X+C_11_3_palano*X*X*X);
double K12 = (svar_GeV-s_A_palano)/s_Kpi_palano*(g_1_a_palano*g_2_a_palano/(svar_GeV-s_a_palano)+g_1_b_palano*g_2_b_palano/(svar_GeV-s_b_palano)+C_12_0_palano+C_12_1_palano*X+C_12_2_palano*X*X+C_12_3_palano*X*X*X);
double K22 = (svar_GeV-s_A_palano)/s_Kpi_palano*(g_2_a_palano*g_2_a_palano/(svar_GeV-s_a_palano)+g_2_b_palano*g_2_b_palano/(svar_GeV-s_b_palano)+C_22_0_palano+C_22_1_palano*X+C_22_2_palano*X*X+C_22_3_palano*X*X*X);
double detK = K11*K22-K12*K12;
pycuda::complex<double> Delta(1.-rho_1*rho_2*detK,-rho_1*K11-rho_2*K22);
pycuda::complex<double> T11_hat = s_Kpi_palano/(svar_GeV-s_A_palano)*(K11-rho_2*detK)/Delta;
pycuda::complex<double> T12_hat = s_Kpi_palano/(svar_GeV-s_A_palano)*K12/Delta;
double xm = X;//(m-1175.)/425.;
double alpha_1_s = 1.+c1_mass_swave*xm+c2_mass_swave*(2.*xm*xm-1.)+c3_mass_swave*(4.*xm*xm*xm-3.*xm)+c4_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
double alpha_2_s = c5_mass_swave+c6_mass_swave*xm+c7_mass_swave*(2.*xm*xm-1.)+c8_mass_swave*(4.*xm*xm*xm-3.*xm)+c9_mass_swave*(8.*xm*xm*xm*xm-8.*xm*xm+1.);
pycuda::complex<double> T = alpha_1_s*T11_hat+alpha_2_s*T12_hat;
return T*exp(i*3.06573);
}
__device__ pycuda::complex<double> Prop_Lass(double m) {
pycuda::complex<double> i(0,1);
double a_lass_ = 1./c1_mass_swave;
double r_lass_ = c2_mass_swave;
double m0_ = c3_mass_swave;
double g0_ = c4_mass_swave;
double q = get_q(m,MPion,MKaon);
double q0 = get_q(m0_,MPion,MKaon);
double cotg_deltaB = 1./(a_lass_*q)+0.5*r_lass_*q;
double deltaB = atan(1./cotg_deltaB);
pycuda::complex<double> expo = exp(i*2.*deltaB);
double gamma = g0_*(q/q0)*(m0_/m);
double cotg_deltaR = (m0_*m0_-m*m)/(m0_*gamma);
pycuda::complex<double> T = 1./(cotg_deltaB-i)+expo/(cotg_deltaR-i);
return T;
}
__device__ pycuda::complex<double> Mji(double m, int ji) {
pycuda::complex<double> i(0,1);
pycuda::complex<double> T;
if (ji == 0)
{
T = Prop_Stheo(m)*exp(-i*pycuda::arg(Prop_Stheo(mv)));
}
else if (ji == 1)
{
T = Resonance(m,mv,gv,1)*exp(-i*pycuda::arg(Resonance(mv,mv,gv,1)));
//T = (Resonance(m,mv,gv,1)+pycuda::complex<double>(c5_mass_swave,c6_mass_swave)*Resonance(m,MKst_1_1410,GKst_1_1410,1)+pycuda::complex<double>(c7_mass_swave,c8_mass_swave)*Resonance(m,MKst_1_1680,GKst_1_1680,1))*exp(-i*pycuda::arg(Resonance(mv,mv,gv,1)+pycuda::complex<double>(c5_mass_swave,c6_mass_swave)*Resonance(mv,MKst_1_1410,GKst_1_1410,1)+pycuda::complex<double>(c7_mass_swave,c8_mass_swave)*Resonance(mv,MKst_1_1680,GKst_1_1680,1)));
}
else if (ji == 2)
{
T = Resonance(m,mt,gt,2)*exp(-i*pycuda::arg(Resonance(mv,mt,gt,2)));
}
return T;
}
__device__ pycuda::complex<double> Mj1j2(double ma, double mb, int j1, int j2) {
double scale_factor = 1.;
if ((j1 == 0) and (j2 == 0)) {return Mji(ma,0)*Mji(mb,0)*FL_j1j2(0,0,ma,mb)*(scale_factor/sqrt(Im00));}
else if ((j1 == 0) and (j2 == 1)) {return Mji(ma,0)*Mji(mb,1)*FL_j1j2(0,1,ma,mb)*(scale_factor/sqrt(Im01));}
else if ((j1 == 1) and (j2 == 0)) {return Mji(ma,1)*Mji(mb,0)*FL_j1j2(1,0,ma,mb)*(scale_factor/sqrt(Im10));}
else if ((j1 == 0) and (j2 == 2)) {return Mji(ma,0)*Mji(mb,2)*FL_j1j2(0,2,ma,mb)*(scale_factor/sqrt(Im02));}
else if ((j1 == 2) and (j2 == 0)) {return Mji(ma,2)*Mji(mb,0)*FL_j1j2(2,0,ma,mb)*(scale_factor/sqrt(Im20));}
else if ((j1 == 1) and (j2 == 1)) {return Mji(ma,1)*Mji(mb,1)*FL_j1j2(1,1,ma,mb)*(scale_factor/sqrt(Im11));}
else if ((j1 == 1) and (j2 == 2)) {return Mji(ma,1)*Mji(mb,2)*FL_j1j2(1,2,ma,mb)*(scale_factor/sqrt(Im12));}
else if ((j1 == 2) and (j2 == 1)) {return Mji(ma,2)*Mji(mb,1)*FL_j1j2(2,1,ma,mb)*(scale_factor/sqrt(Im21));}
else if ((j1 == 2) and (j2 == 2)) {return Mji(ma,2)*Mji(mb,2)*FL_j1j2(2,2,ma,mb)*(scale_factor/sqrt(Im22));}
return pycuda::complex<double>(0.,0.);
}
__device__ pycuda::complex<double> Mj1j2_unnorm(double ma, double mb, int j1, int j2) {
if ((j1 == 0) and (j2 == 0)) {return Mji(ma,0)*Mji(mb,0)*FL_j1j2(0,0,ma,mb);}
else if ((j1 == 0) and (j2 == 1)) {return Mji(ma,0)*Mji(mb,1)*FL_j1j2(0,1,ma,mb);}
else if ((j1 == 1) and (j2 == 0)) {return Mji(ma,1)*Mji(mb,0)*FL_j1j2(1,0,ma,mb);}
else if ((j1 == 0) and (j2 == 2)) {return Mji(ma,0)*Mji(mb,2)*FL_j1j2(0,2,ma,mb);}
else if ((j1 == 2) and (j2 == 0)) {return Mji(ma,2)*Mji(mb,0)*FL_j1j2(2,0,ma,mb);}
else if ((j1 == 1) and (j2 == 1)) {return Mji(ma,1)*Mji(mb,1)*FL_j1j2(1,1,ma,mb);}
else if ((j1 == 1) and (j2 == 2)) {return Mji(ma,1)*Mji(mb,2)*FL_j1j2(1,2,ma,mb);}
else if ((j1 == 2) and (j2 == 1)) {return Mji(ma,2)*Mji(mb,1)*FL_j1j2(2,1,ma,mb);}
else if ((j1 == 2) and (j2 == 2)) {return Mji(ma,2)*Mji(mb,2)*FL_j1j2(2,2,ma,mb);}
return pycuda::complex<double>(0.,0.);
}
__device__ double phasespace(double ma, double mb) {
double Q1 = get_q(ma,MKaon,MPion);
double Q2 = get_q(mb,MKaon,MPion);
double QB = get_q(MBs,ma,mb);
double phsp = Q1*Q2*QB;
return phsp;
}
__device__ pycuda::complex<double> hj1j2j1pj2p(int j1, int j2, int j1p, int j2p, int icat, int iev) {
return Mj1j2_temp(j1,j2,icat,iev)*pycuda::conj(Mj1j2_temp(j1p,j2p,icat,iev))*phasespace_temp[icat][iev];
}
// ##########################################
// PDF elements
__device__ double comp_num_fit(int j1, int j2, int h, int j1p, int j2p, int hp, int icat, int iev) {
return pycuda::real(Tj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp,icat,iev)*Nj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp)*hj1j2j1pj2p(j1,j2,j1p,j2p,icat,iev))*ghhp_phi(h,hp,icat,iev)*fjjphhp_cos1(j1,j1p,h,hp,icat,iev)*fjjphhp_cos2(j2,j2p,h,hp,icat,iev);
}
__device__ double comp_den_fit(int j1, int j2, int h, int j1p, int j2p, int hp, int icat, int iev) {
return pycuda::real(ITj1j2hj1pj2php_deltat(j1,j2,h,j1p,j2p,hp,icat,iev)*pycuda::complex<double>(nw_comp(year_opt[icat],trig_opt[icat],j1,j2,h,j1p,j2p,hp,0),nw_comp(year_opt[icat],trig_opt[icat],j1,j2,h,j1p,j2p,hp,1)));
}
__device__ double num_fit(int icat, int iev) {
return comp_num_fit(0,0,0,0,0,0,icat,iev)+comp_num_fit(0,1,0,0,1,0,icat,iev)+comp_num_fit(0,2,0,0,2,0,icat,iev)+comp_num_fit(1,0,0,1,0,0,icat,iev)+comp_num_fit(1,1,0,1,1,0,icat,iev)+comp_num_fit(1,1,1,1,1,1,icat,iev)
+comp_num_fit(1,1,2,1,1,2,icat,iev)+comp_num_fit(1,2,0,1,2,0,icat,iev)+comp_num_fit(1,2,1,1,2,1,icat,iev)+comp_num_fit(1,2,2,1,2,2,icat,iev)+comp_num_fit(2,0,0,2,0,0,icat,iev)+comp_num_fit(2,1,0,2,1,0,icat,iev)+comp_num_fit(2,1,1,2,1,1,icat,iev)
+comp_num_fit(2,1,2,2,1,2,icat,iev)+comp_num_fit(2,2,0,2,2,0,icat,iev)+comp_num_fit(2,2,1,2,2,1,icat,iev)+comp_num_fit(2,2,2,2,2,2,icat,iev)+comp_num_fit(2,2,3,2,2,3,icat,iev)+comp_num_fit(2,2,4,2,2,4,icat,iev)+2.*comp_num_fit(0,1,0,0,0,0,icat,iev)
+2.*comp_num_fit(0,1,0,1,0,0,icat,iev)+2.*comp_num_fit(0,1,0,2,0,0,icat,iev)+2.*comp_num_fit(0,2,0,0,0,0,icat,iev)+2.*comp_num_fit(0,2,0,0,1,0,icat,iev)+2.*comp_num_fit(0,2,0,1,0,0,icat,iev)+2.*comp_num_fit(0,2,0,1,1,0,icat,iev)
+2.*comp_num_fit(0,2,0,2,0,0,icat,iev)+2.*comp_num_fit(0,2,0,2,1,0,icat,iev)+2.*comp_num_fit(1,0,0,0,0,0,icat,iev)+2.*comp_num_fit(1,1,0,0,0,0,icat,iev)+2.*comp_num_fit(1,1,0,0,1,0,icat,iev)+2.*comp_num_fit(1,1,0,1,0,0,icat,iev)
+2.*comp_num_fit(1,1,0,2,0,0,icat,iev)+2.*comp_num_fit(1,1,1,0,0,0,icat,iev)+2.*comp_num_fit(1,1,1,0,1,0,icat,iev)+2.*comp_num_fit(1,1,1,0,2,0,icat,iev)+2.*comp_num_fit(1,1,1,1,0,0,icat,iev)+2.*comp_num_fit(1,1,1,1,1,0,icat,iev)
+2.*comp_num_fit(1,1,1,1,2,0,icat,iev)+2.*comp_num_fit(1,1,1,2,0,0,icat,iev)+2.*comp_num_fit(1,1,1,2,1,0,icat,iev)+2.*comp_num_fit(1,1,1,2,2,0,icat,iev)+2.*comp_num_fit(1,1,2,0,0,0,icat,iev)+2.*comp_num_fit(1,1,2,0,1,0,icat,iev)
+2.*comp_num_fit(1,1,2,0,2,0,icat,iev)+2.*comp_num_fit(1,1,2,1,0,0,icat,iev)+2.*comp_num_fit(1,1,2,1,1,0,icat,iev)+2.*comp_num_fit(1,1,2,1,1,1,icat,iev)+2.*comp_num_fit(1,1,2,1,2,0,icat,iev)+2.*comp_num_fit(1,1,2,1,2,1,icat,iev)
+2.*comp_num_fit(1,1,2,2,0,0,icat,iev)+2.*comp_num_fit(1,1,2,2,1,0,icat,iev)+2.*comp_num_fit(1,1,2,2,1,1,icat,iev)+2.*comp_num_fit(1,1,2,2,2,0,icat,iev)+2.*comp_num_fit(1,1,2,2,2,1,icat,iev)+2.*comp_num_fit(1,2,0,0,0,0,icat,iev)
+2.*comp_num_fit(1,2,0,0,1,0,icat,iev)+2.*comp_num_fit(1,2,0,0,2,0,icat,iev)+2.*comp_num_fit(1,2,0,1,0,0,icat,iev)+2.*comp_num_fit(1,2,0,1,1,0,icat,iev)+2.*comp_num_fit(1,2,0,2,0,0,icat,iev)+2.*comp_num_fit(1,2,0,2,1,0,icat,iev)
+2.*comp_num_fit(1,2,1,0,0,0,icat,iev)+2.*comp_num_fit(1,2,1,0,1,0,icat,iev)+2.*comp_num_fit(1,2,1,0,2,0,icat,iev)+2.*comp_num_fit(1,2,1,1,0,0,icat,iev)+2.*comp_num_fit(1,2,1,1,1,0,icat,iev)+2.*comp_num_fit(1,2,1,1,1,1,icat,iev)
+2.*comp_num_fit(1,2,1,1,2,0,icat,iev)+2.*comp_num_fit(1,2,1,2,0,0,icat,iev)+2.*comp_num_fit(1,2,1,2,1,0,icat,iev)+2.*comp_num_fit(1,2,1,2,1,1,icat,iev)+2.*comp_num_fit(1,2,1,2,2,0,icat,iev)+2.*comp_num_fit(1,2,2,0,0,0,icat,iev)
+2.*comp_num_fit(1,2,2,0,1,0,icat,iev)+2.*comp_num_fit(1,2,2,0,2,0,icat,iev)+2.*comp_num_fit(1,2,2,1,0,0,icat,iev)+2.*comp_num_fit(1,2,2,1,1,0,icat,iev)+2.*comp_num_fit(1,2,2,1,1,1,icat,iev)+2.*comp_num_fit(1,2,2,1,1,2,icat,iev)
+2.*comp_num_fit(1,2,2,1,2,0,icat,iev)+2.*comp_num_fit(1,2,2,1,2,1,icat,iev)+2.*comp_num_fit(1,2,2,2,0,0,icat,iev)+2.*comp_num_fit(1,2,2,2,1,0,icat,iev)+2.*comp_num_fit(1,2,2,2,1,1,icat,iev)+2.*comp_num_fit(1,2,2,2,1,2,icat,iev)
+2.*comp_num_fit(1,2,2,2,2,0,icat,iev)+2.*comp_num_fit(1,2,2,2,2,1,icat,iev)+2.*comp_num_fit(2,0,0,0,0,0,icat,iev)+2.*comp_num_fit(2,0,0,1,0,0,icat,iev)+2.*comp_num_fit(2,1,0,0,0,0,icat,iev)+2.*comp_num_fit(2,1,0,0,1,0,icat,iev)
+2.*comp_num_fit(2,1,0,1,0,0,icat,iev)+2.*comp_num_fit(2,1,0,1,1,0,icat,iev)+2.*comp_num_fit(2,1,0,2,0,0,icat,iev)+2.*comp_num_fit(2,1,1,0,0,0,icat,iev)+2.*comp_num_fit(2,1,1,0,1,0,icat,iev)+2.*comp_num_fit(2,1,1,0,2,0,icat,iev)
+2.*comp_num_fit(2,1,1,1,0,0,icat,iev)+2.*comp_num_fit(2,1,1,1,1,0,icat,iev)+2.*comp_num_fit(2,1,1,1,1,1,icat,iev)+2.*comp_num_fit(2,1,1,1,2,0,icat,iev)+2.*comp_num_fit(2,1,1,2,0,0,icat,iev)+2.*comp_num_fit(2,1,1,2,1,0,icat,iev)
+2.*comp_num_fit(2,1,1,2,2,0,icat,iev)+2.*comp_num_fit(2,1,2,0,0,0,icat,iev)+2.*comp_num_fit(2,1,2,0,1,0,icat,iev)+2.*comp_num_fit(2,1,2,0,2,0,icat,iev)+2.*comp_num_fit(2,1,2,1,0,0,icat,iev)+2.*comp_num_fit(2,1,2,1,1,0,icat,iev)
+2.*comp_num_fit(2,1,2,1,1,1,icat,iev)+2.*comp_num_fit(2,1,2,1,1,2,icat,iev)+2.*comp_num_fit(2,1,2,1,2,0,icat,iev)+2.*comp_num_fit(2,1,2,1,2,1,icat,iev)+2.*comp_num_fit(2,1,2,2,0,0,icat,iev)+2.*comp_num_fit(2,1,2,2,1,0,icat,iev)
+2.*comp_num_fit(2,1,2,2,1,1,icat,iev)+2.*comp_num_fit(2,1,2,2,2,0,icat,iev)+2.*comp_num_fit(2,1,2,2,2,1,icat,iev)+2.*comp_num_fit(2,2,0,0,0,0,icat,iev)+2.*comp_num_fit(2,2,0,0,1,0,icat,iev)+2.*comp_num_fit(2,2,0,0,2,0,icat,iev)
+2.*comp_num_fit(2,2,0,1,0,0,icat,iev)+2.*comp_num_fit(2,2,0,1,1,0,icat,iev)+2.*comp_num_fit(2,2,0,1,2,0,icat,iev)+2.*comp_num_fit(2,2,0,2,0,0,icat,iev)+2.*comp_num_fit(2,2,0,2,1,0,icat,iev)+2.*comp_num_fit(2,2,1,0,0,0,icat,iev)
+2.*comp_num_fit(2,2,1,0,1,0,icat,iev)+2.*comp_num_fit(2,2,1,0,2,0,icat,iev)+2.*comp_num_fit(2,2,1,1,0,0,icat,iev)+2.*comp_num_fit(2,2,1,1,1,0,icat,iev)+2.*comp_num_fit(2,2,1,1,1,1,icat,iev)+2.*comp_num_fit(2,2,1,1,2,0,icat,iev)
+2.*comp_num_fit(2,2,1,1,2,1,icat,iev)+2.*comp_num_fit(2,2,1,2,0,0,icat,iev)+2.*comp_num_fit(2,2,1,2,1,0,icat,iev)+2.*comp_num_fit(2,2,1,2,1,1,icat,iev)+2.*comp_num_fit(2,2,1,2,2,0,icat,iev)+2.*comp_num_fit(2,2,2,0,0,0,icat,iev)
+2.*comp_num_fit(2,2,2,0,1,0,icat,iev)+2.*comp_num_fit(2,2,2,0,2,0,icat,iev)+2.*comp_num_fit(2,2,2,1,0,0,icat,iev)+2.*comp_num_fit(2,2,2,1,1,0,icat,iev)+2.*comp_num_fit(2,2,2,1,1,1,icat,iev)+2.*comp_num_fit(2,2,2,1,1,2,icat,iev)
+2.*comp_num_fit(2,2,2,1,2,0,icat,iev)+2.*comp_num_fit(2,2,2,1,2,1,icat,iev)+2.*comp_num_fit(2,2,2,1,2,2,icat,iev)+2.*comp_num_fit(2,2,2,2,0,0,icat,iev)+2.*comp_num_fit(2,2,2,2,1,0,icat,iev)+2.*comp_num_fit(2,2,2,2,1,1,icat,iev)
+2.*comp_num_fit(2,2,2,2,1,2,icat,iev)+2.*comp_num_fit(2,2,2,2,2,0,icat,iev)+2.*comp_num_fit(2,2,2,2,2,1,icat,iev)+2.*comp_num_fit(2,2,3,0,0,0,icat,iev)+2.*comp_num_fit(2,2,3,0,1,0,icat,iev)+2.*comp_num_fit(2,2,3,0,2,0,icat,iev)
+2.*comp_num_fit(2,2,3,1,0,0,icat,iev)+2.*comp_num_fit(2,2,3,1,1,0,icat,iev)+2.*comp_num_fit(2,2,3,1,1,1,icat,iev)+2.*comp_num_fit(2,2,3,1,1,2,icat,iev)+2.*comp_num_fit(2,2,3,1,2,0,icat,iev)+2.*comp_num_fit(2,2,3,1,2,1,icat,iev)
+2.*comp_num_fit(2,2,3,1,2,2,icat,iev)+2.*comp_num_fit(2,2,3,2,0,0,icat,iev)+2.*comp_num_fit(2,2,3,2,1,0,icat,iev)+2.*comp_num_fit(2,2,3,2,1,1,icat,iev)+2.*comp_num_fit(2,2,3,2,1,2,icat,iev)+2.*comp_num_fit(2,2,3,2,2,0,icat,iev)
+2.*comp_num_fit(2,2,3,2,2,1,icat,iev)+2.*comp_num_fit(2,2,3,2,2,2,icat,iev)+2.*comp_num_fit(2,2,4,0,0,0,icat,iev)+2.*comp_num_fit(2,2,4,0,1,0,icat,iev)+2.*comp_num_fit(2,2,4,0,2,0,icat,iev)+2.*comp_num_fit(2,2,4,1,0,0,icat,iev)
+2.*comp_num_fit(2,2,4,1,1,0,icat,iev)+2.*comp_num_fit(2,2,4,1,1,1,icat,iev)+2.*comp_num_fit(2,2,4,1,1,2,icat,iev)+2.*comp_num_fit(2,2,4,1,2,0,icat,iev)+2.*comp_num_fit(2,2,4,1,2,1,icat,iev)+2.*comp_num_fit(2,2,4,1,2,2,icat,iev)
+2.*comp_num_fit(2,2,4,2,0,0,icat,iev)+2.*comp_num_fit(2,2,4,2,1,0,icat,iev)+2.*comp_num_fit(2,2,4,2,1,1,icat,iev)+2.*comp_num_fit(2,2,4,2,1,2,icat,iev)+2.*comp_num_fit(2,2,4,2,2,0,icat,iev)+2.*comp_num_fit(2,2,4,2,2,1,icat,iev)
+2.*comp_num_fit(2,2,4,2,2,2,icat,iev)+2.*comp_num_fit(2,2,4,2,2,3,icat,iev);
}
__device__ double den_fit(int icat, int iev) {
return comp_den_fit(0,0,0,0,0,0,icat,iev)+comp_den_fit(0,1,0,0,1,0,icat,iev)+comp_den_fit(0,2,0,0,2,0,icat,iev)+comp_den_fit(1,0,0,1,0,0,icat,iev)+comp_den_fit(1,1,0,1,1,0,icat,iev)+comp_den_fit(1,1,1,1,1,1,icat,iev)
+comp_den_fit(1,1,2,1,1,2,icat,iev)+comp_den_fit(1,2,0,1,2,0,icat,iev)+comp_den_fit(1,2,1,1,2,1,icat,iev)+comp_den_fit(1,2,2,1,2,2,icat,iev)+comp_den_fit(2,0,0,2,0,0,icat,iev)+comp_den_fit(2,1,0,2,1,0,icat,iev)+comp_den_fit(2,1,1,2,1,1,icat,iev)
+comp_den_fit(2,1,2,2,1,2,icat,iev)+comp_den_fit(2,2,0,2,2,0,icat,iev)+comp_den_fit(2,2,1,2,2,1,icat,iev)+comp_den_fit(2,2,2,2,2,2,icat,iev)+comp_den_fit(2,2,3,2,2,3,icat,iev)+comp_den_fit(2,2,4,2,2,4,icat,iev)+2.*comp_den_fit(0,1,0,0,0,0,icat,iev)
+2.*comp_den_fit(0,1,0,1,0,0,icat,iev)+2.*comp_den_fit(0,1,0,2,0,0,icat,iev)+2.*comp_den_fit(0,2,0,0,0,0,icat,iev)+2.*comp_den_fit(0,2,0,0,1,0,icat,iev)+2.*comp_den_fit(0,2,0,1,0,0,icat,iev)+2.*comp_den_fit(0,2,0,1,1,0,icat,iev)
+2.*comp_den_fit(0,2,0,2,0,0,icat,iev)+2.*comp_den_fit(0,2,0,2,1,0,icat,iev)+2.*comp_den_fit(1,0,0,0,0,0,icat,iev)+2.*comp_den_fit(1,1,0,0,0,0,icat,iev)+2.*comp_den_fit(1,1,0,0,1,0,icat,iev)+2.*comp_den_fit(1,1,0,1,0,0,icat,iev)
+2.*comp_den_fit(1,1,0,2,0,0,icat,iev)+2.*comp_den_fit(1,1,1,0,0,0,icat,iev)+2.*comp_den_fit(1,1,1,0,1,0,icat,iev)+2.*comp_den_fit(1,1,1,0,2,0,icat,iev)+2.*comp_den_fit(1,1,1,1,0,0,icat,iev)+2.*comp_den_fit(1,1,1,1,1,0,icat,iev)
+2.*comp_den_fit(1,1,1,1,2,0,icat,iev)+2.*comp_den_fit(1,1,1,2,0,0,icat,iev)+2.*comp_den_fit(1,1,1,2,1,0,icat,iev)+2.*comp_den_fit(1,1,1,2,2,0,icat,iev)+2.*comp_den_fit(1,1,2,0,0,0,icat,iev)+2.*comp_den_fit(1,1,2,0,1,0,icat,iev)
+2.*comp_den_fit(1,1,2,0,2,0,icat,iev)+2.*comp_den_fit(1,1,2,1,0,0,icat,iev)+2.*comp_den_fit(1,1,2,1,1,0,icat,iev)+2.*comp_den_fit(1,1,2,1,1,1,icat,iev)+2.*comp_den_fit(1,1,2,1,2,0,icat,iev)+2.*comp_den_fit(1,1,2,1,2,1,icat,iev)
+2.*comp_den_fit(1,1,2,2,0,0,icat,iev)+2.*comp_den_fit(1,1,2,2,1,0,icat,iev)+2.*comp_den_fit(1,1,2,2,1,1,icat,iev)+2.*comp_den_fit(1,1,2,2,2,0,icat,iev)+2.*comp_den_fit(1,1,2,2,2,1,icat,iev)+2.*comp_den_fit(1,2,0,0,0,0,icat,iev)
+2.*comp_den_fit(1,2,0,0,1,0,icat,iev)+2.*comp_den_fit(1,2,0,0,2,0,icat,iev)+2.*comp_den_fit(1,2,0,1,0,0,icat,iev)+2.*comp_den_fit(1,2,0,1,1,0,icat,iev)+2.*comp_den_fit(1,2,0,2,0,0,icat,iev)+2.*comp_den_fit(1,2,0,2,1,0,icat,iev)
+2.*comp_den_fit(1,2,1,0,0,0,icat,iev)+2.*comp_den_fit(1,2,1,0,1,0,icat,iev)+2.*comp_den_fit(1,2,1,0,2,0,icat,iev)+2.*comp_den_fit(1,2,1,1,0,0,icat,iev)+2.*comp_den_fit(1,2,1,1,1,0,icat,iev)+2.*comp_den_fit(1,2,1,1,1,1,icat,iev)
+2.*comp_den_fit(1,2,1,1,2,0,icat,iev)+2.*comp_den_fit(1,2,1,2,0,0,icat,iev)+2.*comp_den_fit(1,2,1,2,1,0,icat,iev)+2.*comp_den_fit(1,2,1,2,1,1,icat,iev)+2.*comp_den_fit(1,2,1,2,2,0,icat,iev)+2.*comp_den_fit(1,2,2,0,0,0,icat,iev)
+2.*comp_den_fit(1,2,2,0,1,0,icat,iev)+2.*comp_den_fit(1,2,2,0,2,0,icat,iev)+2.*comp_den_fit(1,2,2,1,0,0,icat,iev)+2.*comp_den_fit(1,2,2,1,1,0,icat,iev)+2.*comp_den_fit(1,2,2,1,1,1,icat,iev)+2.*comp_den_fit(1,2,2,1,1,2,icat,iev)
+2.*comp_den_fit(1,2,2,1,2,0,icat,iev)+2.*comp_den_fit(1,2,2,1,2,1,icat,iev)+2.*comp_den_fit(1,2,2,2,0,0,icat,iev)+2.*comp_den_fit(1,2,2,2,1,0,icat,iev)+2.*comp_den_fit(1,2,2,2,1,1,icat,iev)+2.*comp_den_fit(1,2,2,2,1,2,icat,iev)
+2.*comp_den_fit(1,2,2,2,2,0,icat,iev)+2.*comp_den_fit(1,2,2,2,2,1,icat,iev)+2.*comp_den_fit(2,0,0,0,0,0,icat,iev)+2.*comp_den_fit(2,0,0,1,0,0,icat,iev)+2.*comp_den_fit(2,1,0,0,0,0,icat,iev)+2.*comp_den_fit(2,1,0,0,1,0,icat,iev)
+2.*comp_den_fit(2,1,0,1,0,0,icat,iev)+2.*comp_den_fit(2,1,0,1,1,0,icat,iev)+2.*comp_den_fit(2,1,0,2,0,0,icat,iev)+2.*comp_den_fit(2,1,1,0,0,0,icat,iev)+2.*comp_den_fit(2,1,1,0,1,0,icat,iev)+2.*comp_den_fit(2,1,1,0,2,0,icat,iev)
+2.*comp_den_fit(2,1,1,1,0,0,icat,iev)+2.*comp_den_fit(2,1,1,1,1,0,icat,iev)+2.*comp_den_fit(2,1,1,1,1,1,icat,iev)+2.*comp_den_fit(2,1,1,1,2,0,icat,iev)+2.*comp_den_fit(2,1,1,2,0,0,icat,iev)+2.*comp_den_fit(2,1,1,2,1,0,icat,iev)
+2.*comp_den_fit(2,1,1,2,2,0,icat,iev)+2.*comp_den_fit(2,1,2,0,0,0,icat,iev)+2.*comp_den_fit(2,1,2,0,1,0,icat,iev)+2.*comp_den_fit(2,1,2,0,2,0,icat,iev)+2.*comp_den_fit(2,1,2,1,0,0,icat,iev)+2.*comp_den_fit(2,1,2,1,1,0,icat,iev)
+2.*comp_den_fit(2,1,2,1,1,1,icat,iev)+2.*comp_den_fit(2,1,2,1,1,2,icat,iev)+2.*comp_den_fit(2,1,2,1,2,0,icat,iev)+2.*comp_den_fit(2,1,2,1,2,1,icat,iev)+2.*comp_den_fit(2,1,2,2,0,0,icat,iev)+2.*comp_den_fit(2,1,2,2,1,0,icat,iev)
+2.*comp_den_fit(2,1,2,2,1,1,icat,iev)+2.*comp_den_fit(2,1,2,2,2,0,icat,iev)+2.*comp_den_fit(2,1,2,2,2,1,icat,iev)+2.*comp_den_fit(2,2,0,0,0,0,icat,iev)+2.*comp_den_fit(2,2,0,0,1,0,icat,iev)+2.*comp_den_fit(2,2,0,0,2,0,icat,iev)
+2.*comp_den_fit(2,2,0,1,0,0,icat,iev)+2.*comp_den_fit(2,2,0,1,1,0,icat,iev)+2.*comp_den_fit(2,2,0,1,2,0,icat,iev)+2.*comp_den_fit(2,2,0,2,0,0,icat,iev)+2.*comp_den_fit(2,2,0,2,1,0,icat,iev)+2.*comp_den_fit(2,2,1,0,0,0,icat,iev)
+2.*comp_den_fit(2,2,1,0,1,0,icat,iev)+2.*comp_den_fit(2,2,1,0,2,0,icat,iev)+2.*comp_den_fit(2,2,1,1,0,0,icat,iev)+2.*comp_den_fit(2,2,1,1,1,0,icat,iev)+2.*comp_den_fit(2,2,1,1,1,1,icat,iev)+2.*comp_den_fit(2,2,1,1,2,0,icat,iev)
+2.*comp_den_fit(2,2,1,1,2,1,icat,iev)+2.*comp_den_fit(2,2,1,2,0,0,icat,iev)+2.*comp_den_fit(2,2,1,2,1,0,icat,iev)+2.*comp_den_fit(2,2,1,2,1,1,icat,iev)+2.*comp_den_fit(2,2,1,2,2,0,icat,iev)+2.*comp_den_fit(2,2,2,0,0,0,icat,iev)
+2.*comp_den_fit(2,2,2,0,1,0,icat,iev)+2.*comp_den_fit(2,2,2,0,2,0,icat,iev)+2.*comp_den_fit(2,2,2,1,0,0,icat,iev)+2.*comp_den_fit(2,2,2,1,1,0,icat,iev)+2.*comp_den_fit(2,2,2,1,1,1,icat,iev)+2.*comp_den_fit(2,2,2,1,1,2,icat,iev)
+2.*comp_den_fit(2,2,2,1,2,0,icat,iev)+2.*comp_den_fit(2,2,2,1,2,1,icat,iev)+2.*comp_den_fit(2,2,2,1,2,2,icat,iev)+2.*comp_den_fit(2,2,2,2,0,0,icat,iev)+2.*comp_den_fit(2,2,2,2,1,0,icat,iev)+2.*comp_den_fit(2,2,2,2,1,1,icat,iev)
+2.*comp_den_fit(2,2,2,2,1,2,icat,iev)+2.*comp_den_fit(2,2,2,2,2,0,icat,iev)+2.*comp_den_fit(2,2,2,2,2,1,icat,iev)+2.*comp_den_fit(2,2,3,0,0,0,icat,iev)+2.*comp_den_fit(2,2,3,0,1,0,icat,iev)+2.*comp_den_fit(2,2,3,0,2,0,icat,iev)
+2.*comp_den_fit(2,2,3,1,0,0,icat,iev)+2.*comp_den_fit(2,2,3,1,1,0,icat,iev)+2.*comp_den_fit(2,2,3,1,1,1,icat,iev)+2.*comp_den_fit(2,2,3,1,1,2,icat,iev)+2.*comp_den_fit(2,2,3,1,2,0,icat,iev)+2.*comp_den_fit(2,2,3,1,2,1,icat,iev)
+2.*comp_den_fit(2,2,3,1,2,2,icat,iev)+2.*comp_den_fit(2,2,3,2,0,0,icat,iev)+2.*comp_den_fit(2,2,3,2,1,0,icat,iev)+2.*comp_den_fit(2,2,3,2,1,1,icat,iev)+2.*comp_den_fit(2,2,3,2,1,2,icat,iev)+2.*comp_den_fit(2,2,3,2,2,0,icat,iev)
+2.*comp_den_fit(2,2,3,2,2,1,icat,iev)+2.*comp_den_fit(2,2,3,2,2,2,icat,iev)+2.*comp_den_fit(2,2,4,0,0,0,icat,iev)+2.*comp_den_fit(2,2,4,0,1,0,icat,iev)+2.*comp_den_fit(2,2,4,0,2,0,icat,iev)+2.*comp_den_fit(2,2,4,1,0,0,icat,iev)
+2.*comp_den_fit(2,2,4,1,1,0,icat,iev)+2.*comp_den_fit(2,2,4,1,1,1,icat,iev)+2.*comp_den_fit(2,2,4,1,1,2,icat,iev)+2.*comp_den_fit(2,2,4,1,2,0,icat,iev)+2.*comp_den_fit(2,2,4,1,2,1,icat,iev)+2.*comp_den_fit(2,2,4,1,2,2,icat,iev)
+2.*comp_den_fit(2,2,4,2,0,0,icat,iev)+2.*comp_den_fit(2,2,4,2,1,0,icat,iev)+2.*comp_den_fit(2,2,4,2,1,1,icat,iev)+2.*comp_den_fit(2,2,4,2,1,2,icat,iev)+2.*comp_den_fit(2,2,4,2,2,0,icat,iev)+2.*comp_den_fit(2,2,4,2,2,1,icat,iev)
+2.*comp_den_fit(2,2,4,2,2,2,icat,iev)+2.*comp_den_fit(2,2,4,2,2,3,icat,iev);
}
// ##########################################
// PDF evaluator
__device__ void set_buffer_options(double *options, int icat) {
wide_window = (int) options[0];
year_opt[icat] = (int) options[1];
trig_opt[icat] = (int) options[2];
alt_fit = options[3];
option = options[4];
inftres = options[5];
acctype = options[6];
A_j1 = options[7];
A_j2 = options[8];
A_h = options[9];
A_j1p = options[10];
A_j2p = options[11];
A_hp = options[12];
qcomp = options[13];
}
__device__ void set_buffer_amplitudes(double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params) {
reA00 = re_amps[0];
reA01 = re_amps[1];
reA10 = re_amps[2];
reA02 = re_amps[3];
reA20 = re_amps[4];
reA110 = re_amps[5];
reA11par = re_amps[6];
reA11perp = re_amps[7];
reA120 = re_amps[8];
reA12par = re_amps[9];
reA12perp = re_amps[10];
reA210 = re_amps[11];
reA21par = re_amps[12];
reA21perp = re_amps[13];
reA220 = re_amps[14];
reA22par = re_amps[15];
reA22perp = re_amps[16];
reA22par2 = re_amps[17];
reA22perp2 = re_amps[18];
DCP_SS = dirCP_asyms[0];
DCP_SV = dirCP_asyms[1];
DCP_VS = dirCP_asyms[2];
DCP_ST = dirCP_asyms[3];
DCP_TS = dirCP_asyms[4];
DCP = dirCP_asyms[5];
DCP_VT = dirCP_asyms[6];
DCP_TV = dirCP_asyms[7];
DCP_TT = dirCP_asyms[8];
imA00 = im_amps[0];
imA01 = im_amps[1];
imA10 = im_amps[2];
imA02 = im_amps[3];
imA20 = im_amps[4];
imA110 = im_amps[5];
imA11par = im_amps[6];
imA11perp = im_amps[7];
imA120 = im_amps[8];
imA12par = im_amps[9];
imA12perp = im_amps[10];
imA210 = im_amps[11];
imA21par = im_amps[12];
imA21perp = im_amps[13];
imA220 = im_amps[14];
imA22par = im_amps[15];
imA22perp = im_amps[16];
imA22par2 = im_amps[17];
imA22perp2 = im_amps[18];
phis = weak_phases[0];
dphi_SS = weak_phases[1];
dphi_SV = weak_phases[2];
dphi_VS = weak_phases[3];
dphi_ST = weak_phases[4];
dphi_TS = weak_phases[5];
dphi_VT = weak_phases[6];
dphi_TV = weak_phases[7];
dphi_TT = weak_phases[8];
delta_m_freq = mixing_params[0];
gamma_Bs_freq = mixing_params[1];
delta_gamma_freq = mixing_params[2];
p0metac_tag_SSK = calib_params[0];
p0metac_tag_OS = calib_params[1];
Dp0half_tag_SSK = calib_params[2];
Dp0half_tag_OS = calib_params[3];
p1_tag_SSK = calib_params[4];
p1_tag_OS = calib_params[5];
Dp1half_tag_SSK = calib_params[6];
Dp1half_tag_OS = calib_params[7];
etac_tag_SSK = calib_params[8];
etac_tag_OS = calib_params[9];
deltatmean_tres_11 = calib_params[10];
p0_tres_11 = calib_params[11];
p1_tres_11 = calib_params[12];
deltatmean_tres_12 = calib_params[13];
p0_tres_12 = calib_params[14];
p1_tres_12 = calib_params[15];
mv = calib_params[16];
ms = calib_params[17];
mt = calib_params[18];
gv = calib_params[19];
gs = calib_params[20];
gt = calib_params[21];
c1_mass_swave = calib_params[22];
c2_mass_swave = calib_params[23];
c3_mass_swave = calib_params[24];
c4_mass_swave = calib_params[25];
c5_mass_swave = calib_params[26];
c6_mass_swave = calib_params[27];
c7_mass_swave = calib_params[28];
c8_mass_swave = calib_params[29];
c9_mass_swave = calib_params[30];
res_mass = calib_params[31];
if (wide_window == 0) {DCP_prod = -0.0101;}
else {DCP_prod = -0.0072;}
reAj1j2h_temp[0][0][0] = pycuda::real(Aj1j2h(0,0,0));
reAj1j2h_temp[0][1][0] = pycuda::real(Aj1j2h(0,1,0));
reAj1j2h_temp[1][0][0] = pycuda::real(Aj1j2h(1,0,0));
reAj1j2h_temp[0][2][0] = pycuda::real(Aj1j2h(0,2,0));
reAj1j2h_temp[2][0][0] = pycuda::real(Aj1j2h(2,0,0));
reAj1j2h_temp[1][1][0] = pycuda::real(Aj1j2h(1,1,0));
reAj1j2h_temp[1][1][1] = pycuda::real(Aj1j2h(1,1,1));
reAj1j2h_temp[1][1][2] = pycuda::real(Aj1j2h(1,1,2));
reAj1j2h_temp[1][2][0] = pycuda::real(Aj1j2h(1,2,0));
reAj1j2h_temp[1][2][1] = pycuda::real(Aj1j2h(1,2,1));
reAj1j2h_temp[1][2][2] = pycuda::real(Aj1j2h(1,2,2));
reAj1j2h_temp[2][1][0] = pycuda::real(Aj1j2h(2,1,0));
reAj1j2h_temp[2][1][1] = pycuda::real(Aj1j2h(2,1,1));
reAj1j2h_temp[2][1][2] = pycuda::real(Aj1j2h(2,1,2));
reAj1j2h_temp[2][2][0] = pycuda::real(Aj1j2h(2,2,0));
reAj1j2h_temp[2][2][1] = pycuda::real(Aj1j2h(2,2,1));
reAj1j2h_temp[2][2][2] = pycuda::real(Aj1j2h(2,2,2));
reAj1j2h_temp[2][2][3] = pycuda::real(Aj1j2h(2,2,3));
reAj1j2h_temp[2][2][4] = pycuda::real(Aj1j2h(2,2,4));
imAj1j2h_temp[0][0][0] = pycuda::imag(Aj1j2h(0,0,0));
imAj1j2h_temp[0][1][0] = pycuda::imag(Aj1j2h(0,1,0));
imAj1j2h_temp[1][0][0] = pycuda::imag(Aj1j2h(1,0,0));
imAj1j2h_temp[0][2][0] = pycuda::imag(Aj1j2h(0,2,0));
imAj1j2h_temp[2][0][0] = pycuda::imag(Aj1j2h(2,0,0));
imAj1j2h_temp[1][1][0] = pycuda::imag(Aj1j2h(1,1,0));
imAj1j2h_temp[1][1][1] = pycuda::imag(Aj1j2h(1,1,1));
imAj1j2h_temp[1][1][2] = pycuda::imag(Aj1j2h(1,1,2));
imAj1j2h_temp[1][2][0] = pycuda::imag(Aj1j2h(1,2,0));
imAj1j2h_temp[1][2][1] = pycuda::imag(Aj1j2h(1,2,1));
imAj1j2h_temp[1][2][2] = pycuda::imag(Aj1j2h(1,2,2));
imAj1j2h_temp[2][1][0] = pycuda::imag(Aj1j2h(2,1,0));
imAj1j2h_temp[2][1][1] = pycuda::imag(Aj1j2h(2,1,1));
imAj1j2h_temp[2][1][2] = pycuda::imag(Aj1j2h(2,1,2));
imAj1j2h_temp[2][2][0] = pycuda::imag(Aj1j2h(2,2,0));
imAj1j2h_temp[2][2][1] = pycuda::imag(Aj1j2h(2,2,1));
imAj1j2h_temp[2][2][2] = pycuda::imag(Aj1j2h(2,2,2));
imAj1j2h_temp[2][2][3] = pycuda::imag(Aj1j2h(2,2,3));
imAj1j2h_temp[2][2][4] = pycuda::imag(Aj1j2h(2,2,4));
reAbarj1j2h_temp[0][0][0] = pycuda::real(Abarj1j2h(0,0,0));
reAbarj1j2h_temp[0][1][0] = pycuda::real(Abarj1j2h(0,1,0));
reAbarj1j2h_temp[1][0][0] = pycuda::real(Abarj1j2h(1,0,0));
reAbarj1j2h_temp[0][2][0] = pycuda::real(Abarj1j2h(0,2,0));
reAbarj1j2h_temp[2][0][0] = pycuda::real(Abarj1j2h(2,0,0));
reAbarj1j2h_temp[1][1][0] = pycuda::real(Abarj1j2h(1,1,0));
reAbarj1j2h_temp[1][1][1] = pycuda::real(Abarj1j2h(1,1,1));
reAbarj1j2h_temp[1][1][2] = pycuda::real(Abarj1j2h(1,1,2));
reAbarj1j2h_temp[1][2][0] = pycuda::real(Abarj1j2h(1,2,0));
reAbarj1j2h_temp[1][2][1] = pycuda::real(Abarj1j2h(1,2,1));
reAbarj1j2h_temp[1][2][2] = pycuda::real(Abarj1j2h(1,2,2));
reAbarj1j2h_temp[2][1][0] = pycuda::real(Abarj1j2h(2,1,0));
reAbarj1j2h_temp[2][1][1] = pycuda::real(Abarj1j2h(2,1,1));
reAbarj1j2h_temp[2][1][2] = pycuda::real(Abarj1j2h(2,1,2));
reAbarj1j2h_temp[2][2][0] = pycuda::real(Abarj1j2h(2,2,0));
reAbarj1j2h_temp[2][2][1] = pycuda::real(Abarj1j2h(2,2,1));
reAbarj1j2h_temp[2][2][2] = pycuda::real(Abarj1j2h(2,2,2));
reAbarj1j2h_temp[2][2][3] = pycuda::real(Abarj1j2h(2,2,3));
reAbarj1j2h_temp[2][2][4] = pycuda::real(Abarj1j2h(2,2,4));
imAbarj1j2h_temp[0][0][0] = pycuda::imag(Abarj1j2h(0,0,0));
imAbarj1j2h_temp[0][1][0] = pycuda::imag(Abarj1j2h(0,1,0));
imAbarj1j2h_temp[1][0][0] = pycuda::imag(Abarj1j2h(1,0,0));
imAbarj1j2h_temp[0][2][0] = pycuda::imag(Abarj1j2h(0,2,0));
imAbarj1j2h_temp[2][0][0] = pycuda::imag(Abarj1j2h(2,0,0));
imAbarj1j2h_temp[1][1][0] = pycuda::imag(Abarj1j2h(1,1,0));
imAbarj1j2h_temp[1][1][1] = pycuda::imag(Abarj1j2h(1,1,1));
imAbarj1j2h_temp[1][1][2] = pycuda::imag(Abarj1j2h(1,1,2));
imAbarj1j2h_temp[1][2][0] = pycuda::imag(Abarj1j2h(1,2,0));
imAbarj1j2h_temp[1][2][1] = pycuda::imag(Abarj1j2h(1,2,1));
imAbarj1j2h_temp[1][2][2] = pycuda::imag(Abarj1j2h(1,2,2));
imAbarj1j2h_temp[2][1][0] = pycuda::imag(Abarj1j2h(2,1,0));
imAbarj1j2h_temp[2][1][1] = pycuda::imag(Abarj1j2h(2,1,1));
imAbarj1j2h_temp[2][1][2] = pycuda::imag(Abarj1j2h(2,1,2));
imAbarj1j2h_temp[2][2][0] = pycuda::imag(Abarj1j2h(2,2,0));
imAbarj1j2h_temp[2][2][1] = pycuda::imag(Abarj1j2h(2,2,1));
imAbarj1j2h_temp[2][2][2] = pycuda::imag(Abarj1j2h(2,2,2));
imAbarj1j2h_temp[2][2][3] = pycuda::imag(Abarj1j2h(2,2,3));
imAbarj1j2h_temp[2][2][4] = pycuda::imag(Abarj1j2h(2,2,4));
}
__device__ void set_buffer_differential_terms(double *mass_integrals, int icat, int iev) {
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
double f1,f2,s1,s2,x1,x2;
if (acctype == 3) {
f1 = 1.;
f2 = 0.;
s1 = p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12);
//s1 = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*1.1779041429731925;
//s1 = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*t_err[icat][iev];
s2 = 1.;
x1 = t[icat][iev]/(sqrt(2.)*s1);
x2 = t[icat][iev]/(sqrt(2.)*s2);
}
else {
f1 = 1.;
f2 = 0.;
if (year_opt == 0) {s1 = p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11);}
else {s1 = p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12);}
//if (year_opt == 0) {s1 = (p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11))*1.1779041429731925;}
//else {s1 = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*1.1779041429731925;}
//if (year_opt == 0) {s1 = (p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11))*t_err[icat][iev];}
//else {s1 = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*t_err[icat][iev];}
s2 = 1.;
x1 = t[icat][iev]/(sqrt(2.)*s1);
x2 = t[icat][iev]/(sqrt(2.)*s2);
}
pycuda::complex<double> z1_hyper_plus = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z2_hyper_plus = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_hyper_minus = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z2_hyper_minus = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_trigo = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
pycuda::complex<double> z2_trigo = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
double conv_exp_hyper_plus = pycuda::real(f1*conv_exp(x1,z1_hyper_plus)+f2*conv_exp(x2,z2_hyper_plus));
double conv_exp_hyper_minus = pycuda::real(f1*conv_exp(x1,z1_hyper_minus)+f2*conv_exp(x2,z2_hyper_minus));
pycuda::complex<double> conv_exp_trigo = f1*conv_exp(x1,z1_trigo)+f2*conv_exp(x2,z2_trigo);
T_cosh_temp[icat][iev] = 0.5*(conv_exp_hyper_plus + conv_exp_hyper_minus);
T_sinh_temp[icat][iev] = 0.5*(conv_exp_hyper_plus - conv_exp_hyper_minus);
T_cos_temp[icat][iev] = pycuda::real(conv_exp_trigo);
T_sin_temp[icat][iev] = pycuda::imag(conv_exp_trigo);
zeta_temp[icat][iev] = zeta(decision_SSK[icat][iev],decision_OS[icat][iev],etamistag_SSK[icat][iev],etamistag_OS[icat][iev]);
DCP_tzero_temp[icat][iev] = DCP_tzero(decision_SSK[icat][iev],decision_OS[icat][iev],etamistag_SSK[icat][iev],etamistag_OS[icat][iev]);
for (int i=0; i<18; ++i) {fi_cos1_temp[i][icat][iev] = fi(cos1[icat][iev],i+1);}
for (int i=0; i<18; ++i) {fi_cos2_temp[i][icat][iev] = fi(cos2[icat][iev],i+1);}
for (int i=0; i<15; ++i) {gi_temp[i][icat][iev] = gi(phi[icat][iev],i+1);}
for (int j1=0; j1<3; ++j1) {
for (int j2=0; j2<3; ++j2) {
pycuda::complex<double> M_temp = Mj1j2(m1[icat][iev],m2[icat][iev],j1,j2);
reMj1j2_temp[j1][j2][icat][iev] = pycuda::real(M_temp);
imMj1j2_temp[j1][j2][icat][iev] = pycuda::imag(M_temp);
}
}
phasespace_temp[icat][iev] = phasespace(m1[icat][iev],m2[icat][iev]);
}
__device__ double Factorial(int n) {
if(n <= 0) return 1.;
double x = 1;
int b = 0;
do {
b++;
x *= b;
} while(b!=n);
return x;
}
__device__ void set_buffer_integral_terms(int icat, int iev) {
double s1_deltat;
if (acctype == 3) {
s1_deltat = p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12);
//s1_deltat = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*1.1779041429731925;
//s1_deltat = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*t_err[icat][iev];
for (int i=0; i<6; ++i) {spl_knot_vector[year_opt[icat]][trig_opt[icat]][i] = knot_gen(wide_window,i);}
}
else {
if (year_opt == 0) {s1_deltat = p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11);}
else {s1_deltat = p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12);}
//if (year_opt == 0) {s1_deltat = (p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11))*1.1779041429731925;}
//else {s1_deltat = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*1.1779041429731925;}
//if (year_opt == 0) {s1_deltat = (p0_tres_11+p1_tres_11*(t_err[icat][iev]-deltatmean_tres_11))*t_err[icat][iev];}
//else {s1_deltat = (p0_tres_12+p1_tres_12*(t_err[icat][iev]-deltatmean_tres_12))*t_err[icat][iev];}
for (int i=0; i<6; ++i) {spl_knot_vector[year_opt[icat]][trig_opt[icat]][i] = spline_knot(wide_window,i);}
}
pycuda::complex<double> z1_hyper_plus_deltat = s1_deltat/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_hyper_minus_deltat = s1_deltat/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_trigo_deltat = s1_deltat/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
double spl_knot_x1_vector_deltat[6];
double spl_coef_array_deltat[5][4][4][4];
if (acctype == 1 or acctype == 2) {
spl_knot_x1_vector_deltat[0] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][0]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[1] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][1]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[2] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][2]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[3] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][3]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[4] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][4]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[5] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][5]/(sqrt(2.)*s1_deltat);
for (int ibin=0; ibin<5; ++ibin) {
for (int k=0; k<4; ++k) {
for (int i=0; i<(k+1); ++i) {
for (int j=0; j<(i+1); ++j) {
spl_coef_array_deltat[ibin][k][i][j] = spline_coef(year_opt[icat],trig_opt[icat],wide_window,ibin,k)*Factorial(k)/Factorial(k-i)/Factorial(i-j)/Factorial(j)*pow(s1_deltat/sqrt(2.),i+1)*pow(0.,k-i);
}
}
}
}
}
else if (acctype == 3) {
spl_knot_x1_vector_deltat[0] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][0]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[1] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][1]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[2] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][2]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[3] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][3]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[4] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][4]/(sqrt(2.)*s1_deltat);
spl_knot_x1_vector_deltat[5] = spl_knot_vector[year_opt[icat]][trig_opt[icat]][5]/(sqrt(2.)*s1_deltat);
for (int ibin=0; ibin<5; ++ibin) {
for (int k=0; k<4; ++k) {
for (int i=0; i<(k+1); ++i) {
for (int j=0; j<(i+1); ++j) {
spl_coef_array_deltat[ibin][k][i][j] = coef_gen(wide_window,ibin,k)*Factorial(k)/Factorial(k-i)/Factorial(i-j)/Factorial(j)*pow(s1_deltat/sqrt(2.),i+1)*pow(0.,k-i);
}
}
}
}
}
double integral_conv_exp_hyper_plus_deltat = 0;
double integral_conv_exp_hyper_minus_deltat = 0;
pycuda::complex<double> integral_conv_exp_trigo_deltat = pycuda::complex<double>(0.,0.);
if (acctype == 0) {
integral_conv_exp_hyper_plus_deltat += pycuda::real(s1_deltat/sqrt(2.)*Mn(0.,12./(sqrt(2.)*s1_deltat),z1_hyper_plus_deltat,0)*Kn(z1_hyper_plus_deltat,0));
integral_conv_exp_hyper_minus_deltat += pycuda::real(s1_deltat/sqrt(2.)*Mn(0.,12./(sqrt(2.)*s1_deltat),z1_hyper_minus_deltat,0)*Kn(z1_hyper_minus_deltat,0));
integral_conv_exp_trigo_deltat += s1_deltat/sqrt(2.)*Mn(0.,12./(sqrt(2.)*s1_deltat),z1_trigo_deltat,0)*Kn(z1_trigo_deltat,0);
}
else {
for (int ibin=0; ibin<5; ++ibin) {
for (int k=0; k<4; ++k) {
for (int i=0; i<(k+1); ++i) {
for (int j=0; j<(i+1); ++j) {
integral_conv_exp_hyper_plus_deltat += pycuda::real(spl_coef_array_deltat[ibin][k][i][j]*Mn(spl_knot_x1_vector_deltat[ibin],spl_knot_x1_vector_deltat[ibin+1],z1_hyper_plus_deltat,i-j)*Kn(z1_hyper_plus_deltat,j));
integral_conv_exp_hyper_minus_deltat += pycuda::real(spl_coef_array_deltat[ibin][k][i][j]*Mn(spl_knot_x1_vector_deltat[ibin],spl_knot_x1_vector_deltat[ibin+1],z1_hyper_minus_deltat,i-j)*Kn(z1_hyper_minus_deltat,j));
integral_conv_exp_trigo_deltat += spl_coef_array_deltat[ibin][k][i][j]*Mn(spl_knot_x1_vector_deltat[ibin],spl_knot_x1_vector_deltat[ibin+1],z1_trigo_deltat,i-j)*Kn(z1_trigo_deltat,j);
}
}
}
}
}
IT_cosh_temp_deltat[icat][iev] = 0.5*(integral_conv_exp_hyper_plus_deltat + integral_conv_exp_hyper_minus_deltat);
IT_sinh_temp_deltat[icat][iev] = 0.5*(integral_conv_exp_hyper_plus_deltat - integral_conv_exp_hyper_minus_deltat);
IT_cos_temp_deltat[icat][iev] = pycuda::real(integral_conv_exp_trigo_deltat);
IT_sin_temp_deltat[icat][iev] = pycuda::imag(integral_conv_exp_trigo_deltat);
}
__global__ void evaluate(double *data, double *out, double *check, double *options, double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params, double *mass_integrals, int Nevt) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= Nevt) { return;}
int cat_index;
if (options[1]==0) {
if (options[2]==0) {cat_index = 0;}
else {cat_index = 1;}
}
else {
if (options[2]==0) {cat_index = 2;}
else {cat_index = 3;}
}
set_buffer_options(options,cat_index);
set_buffer_amplitudes(re_amps,dirCP_asyms,im_amps,weak_phases,mixing_params,calib_params);
int i0 = row*12;
decision_SSK[cat_index][row] = (int) data[0 + i0];
decision_OS[cat_index][row] = (int) data[1 + i0];
etamistag_SSK[cat_index][row] = data[2 + i0];
etamistag_OS[cat_index][row] = data[3 + i0];
m1[cat_index][row] = data[4 + i0];
m2[cat_index][row] = data[5 + i0];
cos1[cat_index][row] = data[6 + i0];
cos2[cat_index][row] = data[7 + i0];
phi[cat_index][row] = data[8 + i0];
t[cat_index][row] = data[9 + i0];
t_err[cat_index][row] = data[10 + i0];
double ev_weight = data[11 + i0];
double xm1 = (m1[cat_index][row]-1175.)/425.;
double modulus1 = 1.+c1_mass_swave*xm1+c2_mass_swave*(2.*xm1*xm1-1.)+c3_mass_swave*(4.*xm1*xm1*xm1-3.*xm1)+c4_mass_swave*(8.*xm1*xm1*xm1*xm1-8.*xm1*xm1+1.);
double xm2 = (m2[cat_index][row]-1175.)/425.;
double modulus2 = 1.+c1_mass_swave*xm2+c2_mass_swave*(2.*xm2*xm2-1.)+c3_mass_swave*(4.*xm2*xm2*xm2-3.*xm2)+c4_mass_swave*(8.*xm2*xm2*xm2*xm2-8.*xm2*xm2+1.);
if (modulus1<0 or modulus2<0) {out[row] = -10000000000;}
else {
set_buffer_differential_terms(mass_integrals,cat_index,row);
set_buffer_integral_terms(cat_index,row);
double num_fit_temp = num_fit(cat_index,row);
double den_fit_temp = den_fit(cat_index,row);
if (num_fit_temp/den_fit_temp<=0) {out[row] = -10000000000;}
else {out[row] = ev_weight*(log(num_fit_temp/den_fit_temp));}
}
/*set_buffer_differential_terms(mass_integrals,cat_index,row);
set_buffer_integral_terms(cat_index,row);
double num_fit_temp = num_fit(cat_index,row);
double den_fit_temp = den_fit(cat_index,row);
if (num_fit_temp/den_fit_temp<=0) {out[row] = -10000000000;}
else {out[row] = ev_weight*(log(num_fit_temp/den_fit_temp));}*/
}
// ##########################################
// Event generator
__device__ double Ifi(int i) {
if (i == 1) {return If1;}
else if (i == 2) {return If2;}
else if (i == 3) {return If3;}
else if (i == 4) {return If4;}
else if (i == 5) {return If5;}
else if (i == 6) {return If6;}
else if (i == 7) {return If7;}
else if (i == 8) {return If8;}
else if (i == 9) {return If9;}
else if (i == 10) {return If10;}
else if (i == 11) {return If11;}
else if (i == 12) {return If12;}
else if (i == 13) {return If13;}
else if (i == 14) {return If14;}
else if (i == 15) {return If15;}
else if (i == 16) {return If16;}
else if (i == 17) {return If17;}
else if (i == 18) {return If18;}
return 0.;
}
__device__ double Igi(int i) {
if (i == 1) {return 2.*pi;}
else if (i == 2) {return 0.;}
else if (i == 3) {return 0.;}
else if (i == 4) {return pi;}
else if (i == 5) {return 0;}
else if (i == 6) {return pi;}
else if (i == 7) {return 0.;}
else if (i == 8) {return 0.;}
else if (i == 9) {return 0.;}
else if (i == 10) {return 0.;}
else if (i == 11) {return 0.;}
else if (i == 12) {return 0.;}
else if (i == 13) {return pi;}
else if (i == 14) {return 0.;}
else if (i == 15) {return pi;}
return 0.;
}
__device__ double Ifjjphhp(int j, int jp, int h, int hp) {
return Ifi(fjjphhpindexdict[j][jp][h][hp]);
}
__device__ double Ighhp(int h, int hp) {
return Igi(ghhpindexdict[h][hp]);
}
__device__ double Gaus(double x, double mean, double sigma, bool norm = 0) {
if (sigma == 0) return 1.e30;
double arg = (x-mean)/sigma;
double res = exp(-0.5*arg*arg);
if (!norm) return res;
return res/(2.50662827463100024*sigma);
}
__device__ double ran_gamma(curandState localState, double a, double b) {
if (a < 1){
double u = curand_uniform_double(&localState);
return ran_gamma (localState, 1.0 + a, b) * pow (u, 1.0 / a);
}
{
double x, v, u;
double d = a - 1.0 / 3.0;
double c = (1.0 / 3.0) / sqrt (d);
while (1){
do{
x = curand_normal_double(&localState);
v = 1.0 + c * x;
}
while (v <= 0);
v = v * v * v;
u = curand_uniform_double(&localState);
if (u < 1 - 0.0331 * x * x * x * x)
break;
if (log (u) < 0.5 * x * x + d * (1 - v + log (v)))
break;
}
return b * d * v;
}
}
__device__ double ran_P_deltat(curandState localState) {
double result;
while (1) {
result = ran_gamma(localState,gamma1_dt,beta1_dt);
if (result>=0. and result<=0.1) {break;}
}
return result;
}
__device__ double P_eta_SSK(double eta) {
if (eta < 0.5) {return c_SSK*Gaus(eta,mu1_SSK,sigma1_SSK)+(1.-c_SSK)*Gaus(eta,mu2_SSK,sigma2_SSK);}
else {return 0.;}
}
__device__ double P_eta_OS(double eta) {
if (eta < 0.5) {return c_OS*Gaus(eta,mu1_OS,sigma1_OS)+(1.-c_OS)*Gaus(eta,mu2_OS,sigma2_OS);}
else {return 0.;}
}
__global__ void get_max_P_eta_SSK(double *out) {
double funmax = 0;
double etavar;
double funvar;
for (int i=0; i<10000; i++) {
etavar = i/20000.;
funvar = P_eta_SSK(etavar);
if (funvar > funmax) {funmax = funvar;}
}
out[0] = 1.1*funmax;
return;
}
__global__ void get_max_P_eta_OS(double *out) {
double funmax = 0;
double etavar;
double funvar;
for (int i=0; i<10000; i++) {
etavar = i/20000.;
funvar = P_eta_OS(etavar);
if (funvar > funmax) {funmax = funvar;}
}
out[0] = 1.1*funmax;
return;
}
__global__ void set_generator(double *options, double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params, double *cond_distr_params, double *mass_integrals, double *ang_integrals) {
set_buffer_options(options,0);
year_opt[0] = 0;
trig_opt[0] = 0;
set_buffer_amplitudes(re_amps,dirCP_asyms,im_amps,weak_phases,mixing_params,calib_params);
tag_eff_SSK = cond_distr_params[0];
mu1_SSK = cond_distr_params[1];
sigma1_SSK = cond_distr_params[2];
c_SSK = cond_distr_params[3];
mu2_SSK = cond_distr_params[4];
sigma2_SSK = cond_distr_params[5];
tag_eff_OS = cond_distr_params[6];
mu1_OS = cond_distr_params[7];
sigma1_OS = cond_distr_params[8];
c_OS = cond_distr_params[9];
mu2_OS = cond_distr_params[10];
sigma2_OS = cond_distr_params[11];
gamma1_dt = cond_distr_params[12];
beta1_dt = cond_distr_params[13];
c_dt = cond_distr_params[14];
gamma2_dt = cond_distr_params[15];
beta2_dt = cond_distr_params[16];
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
Ih1Re = mass_integrals[9];
Ih2Re = mass_integrals[10];
Ih3Re = mass_integrals[11];
Ih4Re = mass_integrals[12];
Ih5Re = mass_integrals[13];
Ih6Re = mass_integrals[14];
Ih7Re = mass_integrals[15];
Ih8Re = mass_integrals[16];
Ih9Re = mass_integrals[17];
Ih10Re = mass_integrals[18];
Ih11Re = mass_integrals[19];
Ih12Re = mass_integrals[20];
Ih13Re = mass_integrals[21];
Ih14Re = mass_integrals[22];
Ih15Re = mass_integrals[23];
Ih16Re = mass_integrals[24];
Ih17Re = mass_integrals[25];
Ih18Re = mass_integrals[26];
Ih19Re = mass_integrals[27];
Ih20Re = mass_integrals[28];
Ih21Re = mass_integrals[29];
Ih22Re = mass_integrals[30];
Ih23Re = mass_integrals[31];
Ih24Re = mass_integrals[32];
Ih25Re = mass_integrals[33];
Ih26Re = mass_integrals[34];
Ih27Re = mass_integrals[35];
Ih28Re = mass_integrals[36];
Ih29Re = mass_integrals[37];
Ih30Re = mass_integrals[38];
Ih1Im = mass_integrals[39];
Ih2Im = mass_integrals[40];
Ih3Im = mass_integrals[41];
Ih4Im = mass_integrals[42];
Ih5Im = mass_integrals[43];
Ih6Im = mass_integrals[44];
Ih7Im = mass_integrals[45];
Ih8Im = mass_integrals[46];
Ih9Im = mass_integrals[47];
Ih10Im = mass_integrals[48];
Ih11Im = mass_integrals[49];
Ih12Im = mass_integrals[50];
Ih13Im = mass_integrals[51];
Ih14Im = mass_integrals[52];
Ih15Im = mass_integrals[53];
Ih16Im = mass_integrals[54];
Ih17Im = mass_integrals[55];
Ih18Im = mass_integrals[56];
Ih19Im = mass_integrals[57];
Ih20Im = mass_integrals[58];
Ih21Im = mass_integrals[59];
If1 = ang_integrals[0];
If2 = ang_integrals[1];
If3 = ang_integrals[2];
If4 = ang_integrals[3];
If5 = ang_integrals[4];
If6 = ang_integrals[5];
If7 = ang_integrals[6];
If8 = ang_integrals[7];
If9 = ang_integrals[8];
If10 = ang_integrals[9];
If11 = ang_integrals[10];
If12 = ang_integrals[11];
If13 = ang_integrals[12];
If14 = ang_integrals[13];
If15 = ang_integrals[14];
If16 = ang_integrals[15];
If17 = ang_integrals[16];
If18 = ang_integrals[17];
reIhj1j2j1pj2pdict[0][0][0][0] = pycuda::real(pycuda::complex<double>(Ih22Re,0.));
reIhj1j2j1pj2pdict[0][0][0][1] = pycuda::real(pycuda::complex<double>(Ih1Re,Ih1Im));
reIhj1j2j1pj2pdict[0][0][0][2] = pycuda::real(pycuda::complex<double>(Ih2Re,Ih2Im));
reIhj1j2j1pj2pdict[0][0][1][0] = pycuda::real(pycuda::complex<double>(Ih1Re,Ih1Im));
reIhj1j2j1pj2pdict[0][0][1][1] = pycuda::real(pycuda::complex<double>(Ih3Re,Ih3Im));
reIhj1j2j1pj2pdict[0][0][1][2] = pycuda::real(pycuda::complex<double>(Ih4Re,Ih4Im));
reIhj1j2j1pj2pdict[0][0][2][0] = pycuda::real(pycuda::complex<double>(Ih2Re,Ih2Im));
reIhj1j2j1pj2pdict[0][0][2][1] = pycuda::real(pycuda::complex<double>(Ih4Re,Ih4Im));
reIhj1j2j1pj2pdict[0][0][2][2] = pycuda::real(pycuda::complex<double>(Ih5Re,Ih5Im));
reIhj1j2j1pj2pdict[0][1][0][0] = pycuda::real(pycuda::complex<double>(Ih1Re,-Ih1Im));
reIhj1j2j1pj2pdict[0][1][0][1] = pycuda::real(pycuda::complex<double>(Ih23Re,0.));
reIhj1j2j1pj2pdict[0][1][0][2] = pycuda::real(pycuda::complex<double>(Ih6Re,Ih6Im));
reIhj1j2j1pj2pdict[0][1][1][0] = pycuda::real(pycuda::complex<double>(Ih7Re,Ih7Im));
reIhj1j2j1pj2pdict[0][1][1][1] = pycuda::real(pycuda::complex<double>(Ih8Re,Ih8Im));
reIhj1j2j1pj2pdict[0][1][1][2] = pycuda::real(pycuda::complex<double>(Ih9Re,Ih9Im));
reIhj1j2j1pj2pdict[0][1][2][0] = pycuda::real(pycuda::complex<double>(Ih10Re,Ih10Im));
reIhj1j2j1pj2pdict[0][1][2][1] = pycuda::real(pycuda::complex<double>(Ih11Re,Ih11Im));
reIhj1j2j1pj2pdict[0][1][2][2] = pycuda::real(pycuda::complex<double>(Ih12Re,Ih12Im));
reIhj1j2j1pj2pdict[0][2][0][0] = pycuda::real(pycuda::complex<double>(Ih2Re,-Ih2Im));
reIhj1j2j1pj2pdict[0][2][0][1] = pycuda::real(pycuda::complex<double>(Ih6Re,-Ih6Im));
reIhj1j2j1pj2pdict[0][2][0][2] = pycuda::real(pycuda::complex<double>(Ih25Re,0.));
reIhj1j2j1pj2pdict[0][2][1][0] = pycuda::real(pycuda::complex<double>(Ih10Re,-Ih10Im));
reIhj1j2j1pj2pdict[0][2][1][1] = pycuda::real(pycuda::complex<double>(Ih13Re,Ih13Im));
reIhj1j2j1pj2pdict[0][2][1][2] = pycuda::real(pycuda::complex<double>(Ih14Re,Ih14Im));
reIhj1j2j1pj2pdict[0][2][2][0] = pycuda::real(pycuda::complex<double>(Ih15Re,Ih15Im));
reIhj1j2j1pj2pdict[0][2][2][1] = pycuda::real(pycuda::complex<double>(Ih16Re,Ih16Im));
reIhj1j2j1pj2pdict[0][2][2][2] = pycuda::real(pycuda::complex<double>(Ih17Re,Ih17Im));
reIhj1j2j1pj2pdict[1][0][0][0] = pycuda::real(pycuda::complex<double>(Ih1Re,-Ih1Im));
reIhj1j2j1pj2pdict[1][0][0][1] = pycuda::real(pycuda::complex<double>(Ih7Re,Ih7Im));
reIhj1j2j1pj2pdict[1][0][0][2] = pycuda::real(pycuda::complex<double>(Ih10Re,Ih10Im));
reIhj1j2j1pj2pdict[1][0][1][0] = pycuda::real(pycuda::complex<double>(Ih24Re,0.));
reIhj1j2j1pj2pdict[1][0][1][1] = pycuda::real(pycuda::complex<double>(Ih8Re,Ih8Im));
reIhj1j2j1pj2pdict[1][0][1][2] = pycuda::real(pycuda::complex<double>(Ih11Re,Ih11Im));
reIhj1j2j1pj2pdict[1][0][2][0] = pycuda::real(pycuda::complex<double>(Ih6Re,Ih6Im));
reIhj1j2j1pj2pdict[1][0][2][1] = pycuda::real(pycuda::complex<double>(Ih9Re,Ih9Im));
reIhj1j2j1pj2pdict[1][0][2][2] = pycuda::real(pycuda::complex<double>(Ih12Re,Ih12Im));
reIhj1j2j1pj2pdict[1][1][0][0] = pycuda::real(pycuda::complex<double>(Ih3Re,-Ih3Im));
reIhj1j2j1pj2pdict[1][1][0][1] = pycuda::real(pycuda::complex<double>(Ih8Re,-Ih8Im));
reIhj1j2j1pj2pdict[1][1][0][2] = pycuda::real(pycuda::complex<double>(Ih13Re,-Ih13Im));
reIhj1j2j1pj2pdict[1][1][1][0] = pycuda::real(pycuda::complex<double>(Ih8Re,-Ih8Im));
reIhj1j2j1pj2pdict[1][1][1][1] = pycuda::real(pycuda::complex<double>(Ih27Re,0.));
reIhj1j2j1pj2pdict[1][1][1][2] = pycuda::real(pycuda::complex<double>(Ih18Re,Ih18Im));
reIhj1j2j1pj2pdict[1][1][2][0] = pycuda::real(pycuda::complex<double>(Ih13Re,-Ih13Im));
reIhj1j2j1pj2pdict[1][1][2][1] = pycuda::real(pycuda::complex<double>(Ih18Re,Ih18Im));
reIhj1j2j1pj2pdict[1][1][2][2] = pycuda::real(pycuda::complex<double>(Ih19Re,Ih19Im));
reIhj1j2j1pj2pdict[1][2][0][0] = pycuda::real(pycuda::complex<double>(Ih4Re,-Ih4Im));
reIhj1j2j1pj2pdict[1][2][0][1] = pycuda::real(pycuda::complex<double>(Ih9Re,-Ih9Im));
reIhj1j2j1pj2pdict[1][2][0][2] = pycuda::real(pycuda::complex<double>(Ih14Re,-Ih14Im));
reIhj1j2j1pj2pdict[1][2][1][0] = pycuda::real(pycuda::complex<double>(Ih11Re,-Ih11Im));
reIhj1j2j1pj2pdict[1][2][1][1] = pycuda::real(pycuda::complex<double>(Ih18Re,-Ih18Im));
reIhj1j2j1pj2pdict[1][2][1][2] = pycuda::real(pycuda::complex<double>(Ih28Re,0.));
reIhj1j2j1pj2pdict[1][2][2][0] = pycuda::real(pycuda::complex<double>(Ih16Re,-Ih16Im));
reIhj1j2j1pj2pdict[1][2][2][1] = pycuda::real(pycuda::complex<double>(Ih20Re,Ih20Im));
reIhj1j2j1pj2pdict[1][2][2][2] = pycuda::real(pycuda::complex<double>(Ih21Re,Ih21Im));
reIhj1j2j1pj2pdict[2][0][0][0] = pycuda::real(pycuda::complex<double>(Ih2Re,-Ih2Im));
reIhj1j2j1pj2pdict[2][0][0][1] = pycuda::real(pycuda::complex<double>(Ih10Re,-Ih10Im));
reIhj1j2j1pj2pdict[2][0][0][2] = pycuda::real(pycuda::complex<double>(Ih15Re,Ih15Im));
reIhj1j2j1pj2pdict[2][0][1][0] = pycuda::real(pycuda::complex<double>(Ih6Re,-Ih6Im));
reIhj1j2j1pj2pdict[2][0][1][1] = pycuda::real(pycuda::complex<double>(Ih13Re,Ih13Im));
reIhj1j2j1pj2pdict[2][0][1][2] = pycuda::real(pycuda::complex<double>(Ih16Re,Ih16Im));
reIhj1j2j1pj2pdict[2][0][2][0] = pycuda::real(pycuda::complex<double>(Ih26Re,0.));
reIhj1j2j1pj2pdict[2][0][2][1] = pycuda::real(pycuda::complex<double>(Ih14Re,Ih14Im));
reIhj1j2j1pj2pdict[2][0][2][2] = pycuda::real(pycuda::complex<double>(Ih17Re,Ih17Im));
reIhj1j2j1pj2pdict[2][1][0][0] = pycuda::real(pycuda::complex<double>(Ih4Re,-Ih4Im));
reIhj1j2j1pj2pdict[2][1][0][1] = pycuda::real(pycuda::complex<double>(Ih11Re,-Ih11Im));
reIhj1j2j1pj2pdict[2][1][0][2] = pycuda::real(pycuda::complex<double>(Ih16Re,-Ih16Im));
reIhj1j2j1pj2pdict[2][1][1][0] = pycuda::real(pycuda::complex<double>(Ih9Re,-Ih9Im));
reIhj1j2j1pj2pdict[2][1][1][1] = pycuda::real(pycuda::complex<double>(Ih18Re,-Ih18Im));
reIhj1j2j1pj2pdict[2][1][1][2] = pycuda::real(pycuda::complex<double>(Ih20Re,Ih20Im));
reIhj1j2j1pj2pdict[2][1][2][0] = pycuda::real(pycuda::complex<double>(Ih14Re,-Ih14Im));
reIhj1j2j1pj2pdict[2][1][2][1] = pycuda::real(pycuda::complex<double>(Ih29Re,0.));
reIhj1j2j1pj2pdict[2][1][2][2] = pycuda::real(pycuda::complex<double>(Ih21Re,Ih21Im));
reIhj1j2j1pj2pdict[2][2][0][0] = pycuda::real(pycuda::complex<double>(Ih5Re,-Ih5Im));
reIhj1j2j1pj2pdict[2][2][0][1] = pycuda::real(pycuda::complex<double>(Ih12Re,-Ih12Im));
reIhj1j2j1pj2pdict[2][2][0][2] = pycuda::real(pycuda::complex<double>(Ih17Re,-Ih17Im));
reIhj1j2j1pj2pdict[2][2][1][0] = pycuda::real(pycuda::complex<double>(Ih12Re,-Ih12Im));
reIhj1j2j1pj2pdict[2][2][1][1] = pycuda::real(pycuda::complex<double>(Ih19Re,-Ih19Im));
reIhj1j2j1pj2pdict[2][2][1][2] = pycuda::real(pycuda::complex<double>(Ih21Re,-Ih21Im));
reIhj1j2j1pj2pdict[2][2][2][0] = pycuda::real(pycuda::complex<double>(Ih17Re,-Ih17Im));
reIhj1j2j1pj2pdict[2][2][2][1] = pycuda::real(pycuda::complex<double>(Ih21Re,-Ih21Im));
reIhj1j2j1pj2pdict[2][2][2][2] = pycuda::real(pycuda::complex<double>(Ih30Re,0.));
imIhj1j2j1pj2pdict[0][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih22Re,0.));
imIhj1j2j1pj2pdict[0][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih1Re,Ih1Im));
imIhj1j2j1pj2pdict[0][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih2Re,Ih2Im));
imIhj1j2j1pj2pdict[0][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,Ih1Im));
imIhj1j2j1pj2pdict[0][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih3Re,Ih3Im));
imIhj1j2j1pj2pdict[0][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih4Re,Ih4Im));
imIhj1j2j1pj2pdict[0][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,Ih2Im));
imIhj1j2j1pj2pdict[0][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih4Re,Ih4Im));
imIhj1j2j1pj2pdict[0][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih5Re,Ih5Im));
imIhj1j2j1pj2pdict[0][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,-Ih1Im));
imIhj1j2j1pj2pdict[0][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih23Re,0.));
imIhj1j2j1pj2pdict[0][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih6Re,Ih6Im));
imIhj1j2j1pj2pdict[0][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih7Re,Ih7Im));
imIhj1j2j1pj2pdict[0][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,Ih8Im));
imIhj1j2j1pj2pdict[0][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih9Re,Ih9Im));
imIhj1j2j1pj2pdict[0][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih10Re,Ih10Im));
imIhj1j2j1pj2pdict[0][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih11Re,Ih11Im));
imIhj1j2j1pj2pdict[0][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih12Re,Ih12Im));
imIhj1j2j1pj2pdict[0][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,-Ih2Im));
imIhj1j2j1pj2pdict[0][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih6Re,-Ih6Im));
imIhj1j2j1pj2pdict[0][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih25Re,0.));
imIhj1j2j1pj2pdict[0][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih10Re,-Ih10Im));
imIhj1j2j1pj2pdict[0][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih13Re,Ih13Im));
imIhj1j2j1pj2pdict[0][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih14Re,Ih14Im));
imIhj1j2j1pj2pdict[0][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih15Re,Ih15Im));
imIhj1j2j1pj2pdict[0][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih16Re,Ih16Im));
imIhj1j2j1pj2pdict[0][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,Ih17Im));
imIhj1j2j1pj2pdict[1][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,-Ih1Im));
imIhj1j2j1pj2pdict[1][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih7Re,Ih7Im));
imIhj1j2j1pj2pdict[1][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih10Re,Ih10Im));
imIhj1j2j1pj2pdict[1][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih24Re,0.));
imIhj1j2j1pj2pdict[1][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,Ih8Im));
imIhj1j2j1pj2pdict[1][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih11Re,Ih11Im));
imIhj1j2j1pj2pdict[1][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih6Re,Ih6Im));
imIhj1j2j1pj2pdict[1][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih9Re,Ih9Im));
imIhj1j2j1pj2pdict[1][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih12Re,Ih12Im));
imIhj1j2j1pj2pdict[1][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih3Re,-Ih3Im));
imIhj1j2j1pj2pdict[1][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,-Ih8Im));
imIhj1j2j1pj2pdict[1][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih13Re,-Ih13Im));
imIhj1j2j1pj2pdict[1][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih8Re,-Ih8Im));
imIhj1j2j1pj2pdict[1][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih27Re,0.));
imIhj1j2j1pj2pdict[1][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih18Re,Ih18Im));
imIhj1j2j1pj2pdict[1][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih13Re,-Ih13Im));
imIhj1j2j1pj2pdict[1][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,Ih18Im));
imIhj1j2j1pj2pdict[1][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih19Re,Ih19Im));
imIhj1j2j1pj2pdict[1][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih4Re,-Ih4Im));
imIhj1j2j1pj2pdict[1][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih9Re,-Ih9Im));
imIhj1j2j1pj2pdict[1][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih14Re,-Ih14Im));
imIhj1j2j1pj2pdict[1][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih11Re,-Ih11Im));
imIhj1j2j1pj2pdict[1][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,-Ih18Im));
imIhj1j2j1pj2pdict[1][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih28Re,0.));
imIhj1j2j1pj2pdict[1][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih16Re,-Ih16Im));
imIhj1j2j1pj2pdict[1][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih20Re,Ih20Im));
imIhj1j2j1pj2pdict[1][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,Ih21Im));
imIhj1j2j1pj2pdict[2][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,-Ih2Im));
imIhj1j2j1pj2pdict[2][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih10Re,-Ih10Im));
imIhj1j2j1pj2pdict[2][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih15Re,Ih15Im));
imIhj1j2j1pj2pdict[2][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih6Re,-Ih6Im));
imIhj1j2j1pj2pdict[2][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih13Re,Ih13Im));
imIhj1j2j1pj2pdict[2][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih16Re,Ih16Im));
imIhj1j2j1pj2pdict[2][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih26Re,0.));
imIhj1j2j1pj2pdict[2][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih14Re,Ih14Im));
imIhj1j2j1pj2pdict[2][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,Ih17Im));
imIhj1j2j1pj2pdict[2][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih4Re,-Ih4Im));
imIhj1j2j1pj2pdict[2][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih11Re,-Ih11Im));
imIhj1j2j1pj2pdict[2][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih16Re,-Ih16Im));
imIhj1j2j1pj2pdict[2][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih9Re,-Ih9Im));
imIhj1j2j1pj2pdict[2][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,-Ih18Im));
imIhj1j2j1pj2pdict[2][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih20Re,Ih20Im));
imIhj1j2j1pj2pdict[2][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih14Re,-Ih14Im));
imIhj1j2j1pj2pdict[2][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih29Re,0.));
imIhj1j2j1pj2pdict[2][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,Ih21Im));
imIhj1j2j1pj2pdict[2][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih5Re,-Ih5Im));
imIhj1j2j1pj2pdict[2][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih12Re,-Ih12Im));
imIhj1j2j1pj2pdict[2][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,-Ih17Im));
imIhj1j2j1pj2pdict[2][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih12Re,-Ih12Im));
imIhj1j2j1pj2pdict[2][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih19Re,-Ih19Im));
imIhj1j2j1pj2pdict[2][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,-Ih21Im));
imIhj1j2j1pj2pdict[2][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih17Re,-Ih17Im));
imIhj1j2j1pj2pdict[2][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih21Re,-Ih21Im));
imIhj1j2j1pj2pdict[2][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih30Re,0.));
}
__device__ void set_buffer_differential_terms_gen(int iev) {
double f1,f2,s1,s2,x1,x2;
if (acctype == 3) {
f1 = 1.;
f2 = 0.;
s1 = p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12);
//s1 = (p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12))*1.1779041429731925;
//s1 = (p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12))*t_err[0][iev];
s2 = 1.;
x1 = t[0][iev]/(sqrt(2.)*s1);
x2 = t[0][iev]/(sqrt(2.)*s2);
}
else {
f1 = 1.;
f2 = 0.;
if (year_opt == 0) {s1 = p0_tres_11+p1_tres_11*(t_err[0][iev]-deltatmean_tres_11);}
else {s1 = p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12);}
//if (year_opt == 0) {s1 = (p0_tres_11+p1_tres_11*(t_err[0][iev]-deltatmean_tres_11))*1.1779041429731925;}
//else {s1 = (p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12))*1.1779041429731925;}
//if (year_opt == 0) {s1 = (p0_tres_11+p1_tres_11*(t_err[0][iev]-deltatmean_tres_11))*t_err[0][iev];}
//else {s1 = (p0_tres_12+p1_tres_12*(t_err[0][iev]-deltatmean_tres_12))*t_err[0][iev];}
s2 = 1.;
x1 = t[0][iev]/(sqrt(2.)*s1);
x2 = t[0][iev]/(sqrt(2.)*s2);
}
pycuda::complex<double> z1_hyper_plus = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z2_hyper_plus = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq-0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_hyper_minus = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z2_hyper_minus = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq+0.5*delta_gamma_freq,0.);
pycuda::complex<double> z1_trigo = s1/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
pycuda::complex<double> z2_trigo = s2/sqrt(2.)*pycuda::complex<double>(gamma_Bs_freq,-delta_m_freq);
double conv_exp_hyper_plus = pycuda::real(f1*conv_exp(x1,z1_hyper_plus)+f2*conv_exp(x2,z2_hyper_plus));
double conv_exp_hyper_minus = pycuda::real(f1*conv_exp(x1,z1_hyper_minus)+f2*conv_exp(x2,z2_hyper_minus));
pycuda::complex<double> conv_exp_trigo = f1*conv_exp(x1,z1_trigo)+f2*conv_exp(x2,z2_trigo);
T_cosh_temp[0][iev] = 0.5*(conv_exp_hyper_plus + conv_exp_hyper_minus);
T_sinh_temp[0][iev] = 0.5*(conv_exp_hyper_plus - conv_exp_hyper_minus);
T_cos_temp[0][iev] = pycuda::real(conv_exp_trigo);
T_sin_temp[0][iev] = pycuda::imag(conv_exp_trigo);
zeta_temp[0][iev] = zeta(decision_SSK[0][iev],decision_OS[0][iev],etamistag_SSK[0][iev],etamistag_OS[0][iev]);
DCP_tzero_temp[0][iev] = DCP_tzero(decision_SSK[0][iev],decision_OS[0][iev],etamistag_SSK[0][iev],etamistag_OS[0][iev]);
for (int i=0; i<18; ++i) {fi_cos1_temp[i][0][iev] = fi(cos1[0][iev],i+1);}
for (int i=0; i<18; ++i) {fi_cos2_temp[i][0][iev] = fi(cos2[0][iev],i+1);}
for (int i=0; i<15; ++i) {gi_temp[i][0][iev] = gi(phi[0][iev],i+1);}
for (int j1=0; j1<3; ++j1) {
for (int j2=0; j2<3; ++j2) {
pycuda::complex<double> M_temp = Mj1j2(m1[0][iev],m2[0][iev],j1,j2);
reMj1j2_temp[j1][j2][0][iev] = pycuda::real(M_temp);
imMj1j2_temp[j1][j2][0][iev] = pycuda::imag(M_temp);
}
}
phasespace_temp[0][iev] = phasespace(m1[0][iev],m2[0][iev]);
}
__device__ double comp_den_toy(int j1, int j2, int h, int j1p, int j2p, int hp, int iev) {
return pycuda::real(ITj1j2hj1pj2php_deltat(j1,j2,h,j1p,j2p,hp,0,iev)*Nj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp)*Ihj1j2j1pj2p(j1,j2,j1p,j2p))*Ighhp(h,hp)*Ifjjphhp(j1,j1p,h,hp)*Ifjjphhp(j2,j2p,h,hp);
}
__device__ double den_toy(int iev) {
return comp_den_toy(0,0,0,0,0,0,iev)+comp_den_toy(0,1,0,0,1,0,iev)+comp_den_toy(0,2,0,0,2,0,iev)+comp_den_toy(1,0,0,1,0,0,iev)+comp_den_toy(1,1,0,1,1,0,iev)
+comp_den_toy(1,1,1,1,1,1,iev)+comp_den_toy(1,1,2,1,1,2,iev)+comp_den_toy(1,2,0,1,2,0,iev)+comp_den_toy(1,2,1,1,2,1,iev)+comp_den_toy(1,2,2,1,2,2,iev)
+comp_den_toy(2,0,0,2,0,0,iev)+comp_den_toy(2,1,0,2,1,0,iev)+comp_den_toy(2,1,1,2,1,1,iev)+comp_den_toy(2,1,2,2,1,2,iev)+comp_den_toy(2,2,0,2,2,0,iev)
+comp_den_toy(2,2,1,2,2,1,iev)+comp_den_toy(2,2,2,2,2,2,iev)+comp_den_toy(2,2,3,2,2,3,iev)+comp_den_toy(2,2,4,2,2,4,iev)+2.*comp_den_toy(0,1,0,0,0,0,iev)
+2.*comp_den_toy(0,1,0,1,0,0,iev)+2.*comp_den_toy(0,1,0,2,0,0,iev)+2.*comp_den_toy(0,2,0,0,0,0,iev)+2.*comp_den_toy(0,2,0,0,1,0,iev)+2.*comp_den_toy(0,2,0,1,0,0,iev)
+2.*comp_den_toy(0,2,0,1,1,0,iev)+2.*comp_den_toy(0,2,0,2,0,0,iev)+2.*comp_den_toy(0,2,0,2,1,0,iev)+2.*comp_den_toy(1,0,0,0,0,0,iev)+2.*comp_den_toy(1,1,0,0,0,0,iev)
+2.*comp_den_toy(1,1,0,0,1,0,iev)+2.*comp_den_toy(1,1,0,1,0,0,iev)+2.*comp_den_toy(1,1,0,2,0,0,iev)+2.*comp_den_toy(1,2,0,0,0,0,iev)+2.*comp_den_toy(1,2,0,0,1,0,iev)
+2.*comp_den_toy(1,2,0,0,2,0,iev)+2.*comp_den_toy(1,2,0,1,0,0,iev)+2.*comp_den_toy(1,2,0,1,1,0,iev)+2.*comp_den_toy(1,2,0,2,0,0,iev)+2.*comp_den_toy(1,2,0,2,1,0,iev)
+2.*comp_den_toy(1,2,1,1,1,1,iev)+2.*comp_den_toy(1,2,1,2,1,1,iev)+2.*comp_den_toy(1,2,2,1,1,2,iev)+2.*comp_den_toy(1,2,2,2,1,2,iev)+2.*comp_den_toy(2,0,0,0,0,0,iev)
+2.*comp_den_toy(2,0,0,1,0,0,iev)+2.*comp_den_toy(2,1,0,0,0,0,iev)+2.*comp_den_toy(2,1,0,0,1,0,iev)+2.*comp_den_toy(2,1,0,1,0,0,iev)+2.*comp_den_toy(2,1,0,1,1,0,iev)
+2.*comp_den_toy(2,1,0,2,0,0,iev)+2.*comp_den_toy(2,1,1,1,1,1,iev)+2.*comp_den_toy(2,1,2,1,1,2,iev)+2.*comp_den_toy(2,2,0,0,0,0,iev)+2.*comp_den_toy(2,2,0,0,1,0,iev)
+2.*comp_den_toy(2,2,0,0,2,0,iev)+2.*comp_den_toy(2,2,0,1,0,0,iev)+2.*comp_den_toy(2,2,0,1,1,0,iev)+2.*comp_den_toy(2,2,0,1,2,0,iev)+2.*comp_den_toy(2,2,0,2,0,0,iev)
+2.*comp_den_toy(2,2,0,2,1,0,iev)+2.*comp_den_toy(2,2,1,1,1,1,iev)+2.*comp_den_toy(2,2,1,1,2,1,iev)+2.*comp_den_toy(2,2,1,2,1,1,iev)+2.*comp_den_toy(2,2,2,1,1,2,iev)
+2.*comp_den_toy(2,2,2,1,2,2,iev)+2.*comp_den_toy(2,2,2,2,1,2,iev);
}
__global__ void evaluate_CondPDF(double m1_ran, double m2_ran, double cos1_ran, double cos2_ran, double phi_ran, double t_ran, double t_err_ran, int q_SSK_ran, int q_OS_ran, double eta_SSK_ran, double eta_OS_ran, double *mixing_params, double *calib_params, double *out) {
m1[0][0] = m1_ran;
m2[0][0] = m2_ran;
cos1[0][0] = cos1_ran;
cos2[0][0] = cos2_ran;
phi[0][0] = phi_ran;
t[0][0] = t_ran;
t_err[0][0] = t_err_ran;
decision_SSK[0][0] = q_SSK_ran;
decision_OS[0][0] = q_OS_ran;
etamistag_SSK[0][0] = eta_SSK_ran;
etamistag_OS[0][0] = eta_OS_ran;
gamma_Bs_freq = mixing_params[1];
delta_gamma_freq = mixing_params[2];
p0metac_tag_SSK = calib_params[0];
p0metac_tag_OS = calib_params[1];
Dp0half_tag_SSK = calib_params[2];
Dp0half_tag_OS = calib_params[3];
p1_tag_SSK = calib_params[4];
p1_tag_OS = calib_params[5];
Dp1half_tag_SSK = calib_params[6];
Dp1half_tag_OS = calib_params[7];
p0_tres_12 = calib_params[14];
p1_tres_12 = calib_params[15];
c5_mass_swave = calib_params[26];
c6_mass_swave = calib_params[27];
c7_mass_swave = calib_params[28];
c8_mass_swave = calib_params[29];
set_buffer_differential_terms_gen(0);
set_buffer_integral_terms(0,0);
double num_temp = num_fit(0,0)*accGen(t_ran,m1_ran,m2_ran,cos1_ran,cos2_ran,phi_ran);
double den_temp = den_toy(0);
if (num_temp/den_temp<=0) {out[0] = -1.e20;}
else {out[0] = log(num_temp/den_temp);}
}
__global__ void generateEvent(double *gendata, double max_fun_eta_SSK, double max_fun_eta_OS, double max_fun_cond, int Nevt) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= Nevt) { return;}
curandState state;
curand_init((unsigned long long)clock(), row, 0, &state);
// Determination of the per event decay time error.
t_err[0][row] = ran_P_deltat(state);
// Determination of the SSK mistag probability.
double dec_SSK_tagged = curand_uniform(&state);
if (dec_SSK_tagged < tag_eff_SSK) {
double etamistag_SSK_ran;
max_fun[row] = max_fun_eta_SSK;
while (1) {
etamistag_SSK_ran = 0.5*curand_uniform(&state);
fun_ran[row] = P_eta_SSK(etamistag_SSK_ran);
dec_accepted[row] = max_fun[row]*curand_uniform(&state);
if (fun_ran[row] > dec_accepted[row]) {break;}
}
etamistag_SSK[0][row] = etamistag_SSK_ran;
}
else {
etamistag_SSK[0][row] = 0.5;
}
// Determination of the OS mistag probability.
double dec_OS_tagged = curand_uniform(&state);
if (dec_OS_tagged < tag_eff_OS) {
double etamistag_OS_ran;
max_fun[row] = max_fun_eta_OS;
while (1) {
etamistag_OS_ran = 0.5*curand_uniform(&state);
fun_ran[row] = P_eta_OS(etamistag_OS_ran);
dec_accepted[row] = max_fun[row]*curand_uniform(&state);
if (fun_ran[row] > dec_accepted[row]) {break;}
}
etamistag_OS[0][row] = etamistag_OS_ran;
}
else {
etamistag_OS[0][row] = 0.5;
}
// Determination of the decay observables.
max_fun[row] = max_fun_cond;
while (1) {
if (wide_window == 1) {
m1[0][row] = 750.+curand_uniform(&state)*(1600.-750.);
m2[0][row] = 750.+curand_uniform(&state)*(1600.-750.);
}
else {
m1[0][row] = 750.+curand_uniform(&state)*(1050.-750.);
m2[0][row] = 750.+curand_uniform(&state)*(1050.-750.);
}
cos1[0][row] = -1.+curand_uniform(&state)*2.;
cos2[0][row] = -1.+curand_uniform(&state)*2.;
phi[0][row] = curand_uniform(&state)*2.*pi;
t[0][row] = curand_uniform(&state)*12.;
if (etamistag_SSK[0][row] == 0.5) {decision_SSK[0][row] = 0;}
else {
double d_SSK = curand_uniform(&state);
if (d_SSK <= 0.5) {decision_SSK[0][row] = -1;}
else {decision_SSK[0][row] = 1;}
}
if (etamistag_OS[0][row] == 0.5) {decision_OS[0][row] = 0;}
else {
double d_OS = curand_uniform(&state);
if (d_OS <= 0.5) {decision_OS[0][row] = -1;}
else {decision_OS[0][row] = 1;}
}
set_buffer_differential_terms_gen(row);
set_buffer_integral_terms(0,row);
dec_accepted[row] = curand_uniform(&state);
fun_ran[row] = num_fit(0,row)/den_toy(row)*accGen(t[0][row],m1[0][row],m2[0][row],cos1[0][row],cos2[0][row],phi[0][row])/max_fun[row];
if (fun_ran[row] > dec_accepted[row]) {break;}
}
int i0 = row*12;
gendata[0 + i0] = (double) decision_SSK[0][row];
gendata[1 + i0] = (double) decision_OS[0][row];
gendata[2 + i0] = etamistag_SSK[0][row];
gendata[3 + i0] = etamistag_OS[0][row];
gendata[4 + i0] = m1[0][row];
gendata[5 + i0] = m2[0][row];
gendata[6 + i0] = cos1[0][row];
gendata[7 + i0] = cos2[0][row];
gendata[8 + i0] = phi[0][row];
gendata[9 + i0] = t[0][row];
gendata[10 + i0] = t_err[0][row];
gendata[11 + i0] = 1.;
return;
}
__global__ void evaluate_toy(double *data, double *out, double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params, int Nevt) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= Nevt) { return;}
set_buffer_amplitudes(re_amps,dirCP_asyms,im_amps,weak_phases,mixing_params,calib_params);
int i0 = row*12;
decision_SSK[0][row] = (int) data[0 + i0];
decision_OS[0][row] = (int) data[1 + i0];
etamistag_SSK[0][row] = data[2 + i0];
etamistag_OS[0][row] = data[3 + i0];
m1[0][row] = data[4 + i0];
m2[0][row] = data[5 + i0];
cos1[0][row] = data[6 + i0];
cos2[0][row] = data[7 + i0];
phi[0][row] = data[8 + i0];
t[0][row] = data[9 + i0];
t_err[0][row] = data[10 + i0];
set_buffer_differential_terms_gen(row);
set_buffer_integral_terms(0,row);
double num_fit_temp = num_fit(0,row);
double den_fit_temp = den_toy(row);
if (num_fit_temp/den_fit_temp<=0) {out[row] = -10000000000;}
else {out[row] = log(num_fit_temp/den_fit_temp);}
}
__global__ void set_mass_params(double *calib_params) {
mv = calib_params[16];
ms = calib_params[17];
mt = calib_params[18];
gv = calib_params[19];
gs = calib_params[20];
gt = calib_params[21];
c1_mass_swave = calib_params[22];
c2_mass_swave = calib_params[23];
c3_mass_swave = calib_params[24];
c4_mass_swave = calib_params[25];
c5_mass_swave = calib_params[26];
c6_mass_swave = calib_params[27];
c7_mass_swave = calib_params[28];
c8_mass_swave = calib_params[29];
c9_mass_swave = calib_params[30];
}
__global__ void find_max_mass_pdf(int mpdfid, int mintnpoints, double minthlimit, double *mpdfarray) {
int mintindex = threadIdx.x + blockDim.x * blockIdx.x;
if (mintindex >= mintnpoints*mintnpoints) { return;}
int im1 = mintindex / mintnpoints;
int im2 = mintindex % mintnpoints;
double mintstep = (minthlimit-750.)/mintnpoints;
double m1_ = 750.+im1*mintstep;
double m2_ = 750.+im2*mintstep;
if (mpdfid == 0) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,0))*phasespace(m1_,m2_);}
else if (mpdfid == 1) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,1))*phasespace(m1_,m2_);}
else if (mpdfid == 2) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,0))*phasespace(m1_,m2_);}
else if (mpdfid == 3) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,2))*phasespace(m1_,m2_);}
else if (mpdfid == 4) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,0))*phasespace(m1_,m2_);}
else if (mpdfid == 5) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,1))*phasespace(m1_,m2_);}
else if (mpdfid == 6) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,2))*phasespace(m1_,m2_);}
else if (mpdfid == 7) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,1))*phasespace(m1_,m2_);}
else if (mpdfid == 8) {mpdfarray[mintindex] = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,2))*phasespace(m1_,m2_);}
}
__global__ void compute_mass_integral(int mpdfid, int npoints, double minthlimit, double maxmpdf, int *mintarray) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= npoints) { return;}
curandState state;
curand_init((unsigned long long)clock(), row, 0, &state);
double m1_ = 750.+curand_uniform(&state)*(minthlimit-750.);
double m2_ = 750.+curand_uniform(&state)*(minthlimit-750.);
double vertical_ = curand_uniform(&state)*maxmpdf;
double mpdf_temp = 0;
if (mpdfid == 0) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,0))*phasespace(m1_,m2_);}
else if (mpdfid == 1) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,1))*phasespace(m1_,m2_);}
else if (mpdfid == 2) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,0))*phasespace(m1_,m2_);}
else if (mpdfid == 3) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,2))*phasespace(m1_,m2_);}
else if (mpdfid == 4) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,0))*phasespace(m1_,m2_);}
else if (mpdfid == 5) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,1))*phasespace(m1_,m2_);}
else if (mpdfid == 6) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,2))*phasespace(m1_,m2_);}
else if (mpdfid == 7) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,1))*phasespace(m1_,m2_);}
else if (mpdfid == 8) {mpdf_temp = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,2))*phasespace(m1_,m2_);}
if (vertical_ <= mpdf_temp) {mintarray[row] = 1;}
else {mintarray[row] = 0;}
}
__global__ void compute_mint_array(int mpdfid, int mintnpoints, double minthlimit, double *mpdfarray) {
int mintindex = threadIdx.x + blockDim.x * blockIdx.x;
if (mintindex >= mintnpoints*mintnpoints) { return;}
int im1 = mintindex / mintnpoints;
int im2 = mintindex % mintnpoints;
double mintstep = (minthlimit-750.)/mintnpoints;
double m1_ = 750.+im1*mintstep;
double m2_ = 750.+im2*mintstep;
double m1next_ = 750.+(im1+1)*mintstep;
double m2next_ = 750.+(im2+1)*mintstep;
double point1 = 0.;
double point2 = 0.;
double point3 = 0.;
double point4 = 0.;
if (mpdfid == 0) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,0))*phasespace(m1_,m2_);}
else if (mpdfid == 1) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,1))*phasespace(m1_,m2_);}
else if (mpdfid == 2) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,0))*phasespace(m1_,m2_);}
else if (mpdfid == 3) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,0,2))*phasespace(m1_,m2_);}
else if (mpdfid == 4) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,0))*phasespace(m1_,m2_);}
else if (mpdfid == 5) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,1))*phasespace(m1_,m2_);}
else if (mpdfid == 6) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,1,2))*phasespace(m1_,m2_);}
else if (mpdfid == 7) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,1))*phasespace(m1_,m2_);}
else if (mpdfid == 8) {point1 = pycuda::norm(Mj1j2_unnorm(m1_,m2_,2,2))*phasespace(m1_,m2_);}
if (mpdfid == 0) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,0,0))*phasespace(m1next_,m2_);}
else if (mpdfid == 1) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,0,1))*phasespace(m1next_,m2_);}
else if (mpdfid == 2) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,1,0))*phasespace(m1next_,m2_);}
else if (mpdfid == 3) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,0,2))*phasespace(m1next_,m2_);}
else if (mpdfid == 4) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,2,0))*phasespace(m1next_,m2_);}
else if (mpdfid == 5) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,1,1))*phasespace(m1next_,m2_);}
else if (mpdfid == 6) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,1,2))*phasespace(m1next_,m2_);}
else if (mpdfid == 7) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,2,1))*phasespace(m1next_,m2_);}
else if (mpdfid == 8) {point2 = pycuda::norm(Mj1j2_unnorm(m1next_,m2_,2,2))*phasespace(m1next_,m2_);}
if (mpdfid == 0) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,0,0))*phasespace(m1_,m2next_);}
else if (mpdfid == 1) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,0,1))*phasespace(m1_,m2next_);}
else if (mpdfid == 2) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,1,0))*phasespace(m1_,m2next_);}
else if (mpdfid == 3) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,0,2))*phasespace(m1_,m2next_);}
else if (mpdfid == 4) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,2,0))*phasespace(m1_,m2next_);}
else if (mpdfid == 5) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,1,1))*phasespace(m1_,m2next_);}
else if (mpdfid == 6) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,1,2))*phasespace(m1_,m2next_);}
else if (mpdfid == 7) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,2,1))*phasespace(m1_,m2next_);}
else if (mpdfid == 8) {point3 = pycuda::norm(Mj1j2_unnorm(m1_,m2next_,2,2))*phasespace(m1_,m2next_);}
if (mpdfid == 0) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,0,0))*phasespace(m1next_,m2next_);}
else if (mpdfid == 1) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,0,1))*phasespace(m1next_,m2next_);}
else if (mpdfid == 2) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,1,0))*phasespace(m1next_,m2next_);}
else if (mpdfid == 3) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,0,2))*phasespace(m1next_,m2next_);}
else if (mpdfid == 4) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,2,0))*phasespace(m1next_,m2next_);}
else if (mpdfid == 5) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,1,1))*phasespace(m1next_,m2next_);}
else if (mpdfid == 6) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,1,2))*phasespace(m1next_,m2next_);}
else if (mpdfid == 7) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,2,1))*phasespace(m1next_,m2next_);}
else if (mpdfid == 8) {point4 = pycuda::norm(Mj1j2_unnorm(m1next_,m2next_,2,2))*phasespace(m1next_,m2next_);}
mpdfarray[mintindex] = 0.25*(point1+point2+point3+point4)*mintstep*mintstep;
}
__global__ void set_mass_integrals(double *mass_integrals) {
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
}
__global__ void compute_nw(double *MCdata, int j1, int j2, int h, int j1p, int j2p, int hp, int part, int NMCevts, double *evout) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= NMCevts) { return;}
int i0 = row*6;
double m1_ = MCdata[0 + i0];
double m2_ = MCdata[1 + i0];
double cos1_ = MCdata[2 + i0];
double cos2_ = MCdata[3 + i0];
double phi_ = MCdata[4 + i0];
double weight_ = MCdata[5 + i0];
pycuda::complex<double> ev_nw_temp = Nj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp)*Mj1j2(m1_,m2_,j1,j2)*pycuda::conj(Mj1j2(m1_,m2_,j1p,j2p))*phasespace(m1_,m2_)*fi(cos1_,(int) fjjphhpindexdict[j1][j1p][h][hp])*fi(cos2_,(int) fjjphhpindexdict[j2][j2p][h][hp])*gi(phi_,(int) ghhpindexdict[h][hp]);
if (part == 0) {evout[row] = 100.*weight_*pycuda::real(ev_nw_temp);}
else {evout[row] = 100.*weight_*pycuda::imag(ev_nw_temp);}
}
__global__ void set_nw_val(double nwval, int year_opt, int trig_opt, int inw) {
nw_comp_matrix[year_opt][trig_opt][inw] = nwval;
}
__global__ void set_spline_coefs(double a_2011_L0TIS_mod[][4], double a_2011_L0noTIS_mod[][4], double a_2012_L0TIS_mod[][4], double a_2012_L0noTIS_mod[][4]) {
for (int ibin=0; ibin<5; ++ibin) {
for (int deg=0; deg<4; ++deg) {
a_2011_L0TIS_wide[ibin][deg] = a_2011_L0TIS_mod[ibin][deg];
a_2011_L0noTIS_wide[ibin][deg] = a_2011_L0noTIS_mod[ibin][deg];
a_2012_L0TIS_wide[ibin][deg] = a_2012_L0TIS_mod[ibin][deg];
a_2012_L0noTIS_wide[ibin][deg] = a_2012_L0noTIS_mod[ibin][deg];
}
}
}
__global__ void compute_nwcov(double *masterevarray, int numofevts, double nwcovout[][336]) {
int nwcovlinindex = threadIdx.x + blockDim.x * blockIdx.x;
if (nwcovlinindex >= 336*336) { return;}
int inw = nwcovlinindex / 336;
int jnw = nwcovlinindex % 336;
double sumi = 0;
double sumj = 0;
double sumij = 0;
for( int kev = 0; kev < numofevts; kev++ ) {
sumi += masterevarray[inw*numofevts+kev];
sumj += masterevarray[jnw*numofevts+kev];
sumij += masterevarray[inw*numofevts+kev]*masterevarray[jnw*numofevts+kev];
}
nwcovout[inw][jnw] = sumij-sumi*sumj/numofevts;
}
__device__ double real_acc_mint(int imint, double ma, double mb) {
if (imint == 0) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 1) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 2) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 3) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 4) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 5) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 6) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 7) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 8) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 9) {return pycuda::real(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 10) {return pycuda::real(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 11) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 12) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 13) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 14) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,2,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 15) {return pycuda::real(Mj1j2(ma,mb,2,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 16) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 17) {return pycuda::real(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 18) {return pycuda::real(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 19) {return pycuda::real(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,2,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 20) {return pycuda::real(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 21) {return pycuda::real(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 22) {return pycuda::real(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,0,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 23) {return pycuda::real(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,1,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 24) {return pycuda::real(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 25) {return pycuda::real(Mj1j2(ma,mb,2,0)*pycuda::conj(Mj1j2(ma,mb,2,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 26) {return pycuda::real(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 27) {return pycuda::real(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 28) {return pycuda::real(Mj1j2(ma,mb,2,1)*pycuda::conj(Mj1j2(ma,mb,2,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 29) {return pycuda::real(Mj1j2(ma,mb,2,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
return 0.;
}
__device__ double imag_acc_mint(int imint, double ma, double mb) {
if (imint == 0) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 1) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 2) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 3) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 4) {return pycuda::imag(Mj1j2(ma,mb,0,0)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 5) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 6) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 7) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 8) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 9) {return pycuda::imag(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,0,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 10) {return pycuda::imag(Mj1j2(ma,mb,1,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 11) {return pycuda::imag(Mj1j2(ma,mb,0,1)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 12) {return pycuda::imag(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,1,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 13) {return pycuda::imag(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 14) {return pycuda::imag(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,2,0)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 15) {return pycuda::imag(Mj1j2(ma,mb,2,0)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 16) {return pycuda::imag(Mj1j2(ma,mb,0,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 17) {return pycuda::imag(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,1,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 18) {return pycuda::imag(Mj1j2(ma,mb,1,1)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 19) {return pycuda::imag(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,2,1)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
else if (imint == 20) {return pycuda::imag(Mj1j2(ma,mb,1,2)*pycuda::conj(Mj1j2(ma,mb,2,2)))*phasespace(ma,mb)*accGenMass(ma)*accGenMass(mb);}
return 0.;
}
__global__ void compute_acc_mint_array(int imint, int part, int mintnpoints, double minthlimit, double *mpdfarray) {
int mintindex = threadIdx.x + blockDim.x * blockIdx.x;
if (mintindex >= mintnpoints*mintnpoints) { return;}
int im1 = mintindex / mintnpoints;
int im2 = mintindex % mintnpoints;
double mintstep = (minthlimit-750.)/mintnpoints;
double m1_ = 750.+im1*mintstep;
double m2_ = 750.+im2*mintstep;
double m1next_ = 750.+(im1+1)*mintstep;
double m2next_ = 750.+(im2+1)*mintstep;
double point1 = 0.;
double point2 = 0.;
double point3 = 0.;
double point4 = 0.;
if (part == 0) {
point1 = real_acc_mint(imint,m1_,m2_);
point2 = real_acc_mint(imint,m1next_,m2_);
point3 = real_acc_mint(imint,m1_,m2next_);
point4 = real_acc_mint(imint,m1next_,m2next_);
}
else {
point1 = imag_acc_mint(imint,m1_,m2_);
point2 = imag_acc_mint(imint,m1next_,m2_);
point3 = imag_acc_mint(imint,m1_,m2next_);
point4 = imag_acc_mint(imint,m1next_,m2next_);
}
mpdfarray[mintindex] = 0.25*(point1+point2+point3+point4)*mintstep*mintstep;
}
__global__ void set_all_mass_integrals(double *mass_integrals) {
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
Ih1Re = mass_integrals[9];
Ih2Re = mass_integrals[10];
Ih3Re = mass_integrals[11];
Ih4Re = mass_integrals[12];
Ih5Re = mass_integrals[13];
Ih6Re = mass_integrals[14];
Ih7Re = mass_integrals[15];
Ih8Re = mass_integrals[16];
Ih9Re = mass_integrals[17];
Ih10Re = mass_integrals[18];
Ih11Re = mass_integrals[19];
Ih12Re = mass_integrals[20];
Ih13Re = mass_integrals[21];
Ih14Re = mass_integrals[22];
Ih15Re = mass_integrals[23];
Ih16Re = mass_integrals[24];
Ih17Re = mass_integrals[25];
Ih18Re = mass_integrals[26];
Ih19Re = mass_integrals[27];
Ih20Re = mass_integrals[28];
Ih21Re = mass_integrals[29];
Ih22Re = mass_integrals[30];
Ih23Re = mass_integrals[31];
Ih24Re = mass_integrals[32];
Ih25Re = mass_integrals[33];
Ih26Re = mass_integrals[34];
Ih27Re = mass_integrals[35];
Ih28Re = mass_integrals[36];
Ih29Re = mass_integrals[37];
Ih30Re = mass_integrals[38];
Ih1Im = mass_integrals[39];
Ih2Im = mass_integrals[40];
Ih3Im = mass_integrals[41];
Ih4Im = mass_integrals[42];
Ih5Im = mass_integrals[43];
Ih6Im = mass_integrals[44];
Ih7Im = mass_integrals[45];
Ih8Im = mass_integrals[46];
Ih9Im = mass_integrals[47];
Ih10Im = mass_integrals[48];
Ih11Im = mass_integrals[49];
Ih12Im = mass_integrals[50];
Ih13Im = mass_integrals[51];
Ih14Im = mass_integrals[52];
Ih15Im = mass_integrals[53];
Ih16Im = mass_integrals[54];
Ih17Im = mass_integrals[55];
Ih18Im = mass_integrals[56];
Ih19Im = mass_integrals[57];
Ih20Im = mass_integrals[58];
Ih21Im = mass_integrals[59];
reIhj1j2j1pj2pdict[0][0][0][0] = pycuda::real(pycuda::complex<double>(Ih22Re,0.));
reIhj1j2j1pj2pdict[0][0][0][1] = pycuda::real(pycuda::complex<double>(Ih1Re,Ih1Im));
reIhj1j2j1pj2pdict[0][0][0][2] = pycuda::real(pycuda::complex<double>(Ih2Re,Ih2Im));
reIhj1j2j1pj2pdict[0][0][1][0] = pycuda::real(pycuda::complex<double>(Ih1Re,Ih1Im));
reIhj1j2j1pj2pdict[0][0][1][1] = pycuda::real(pycuda::complex<double>(Ih3Re,Ih3Im));
reIhj1j2j1pj2pdict[0][0][1][2] = pycuda::real(pycuda::complex<double>(Ih4Re,Ih4Im));
reIhj1j2j1pj2pdict[0][0][2][0] = pycuda::real(pycuda::complex<double>(Ih2Re,Ih2Im));
reIhj1j2j1pj2pdict[0][0][2][1] = pycuda::real(pycuda::complex<double>(Ih4Re,Ih4Im));
reIhj1j2j1pj2pdict[0][0][2][2] = pycuda::real(pycuda::complex<double>(Ih5Re,Ih5Im));
reIhj1j2j1pj2pdict[0][1][0][0] = pycuda::real(pycuda::complex<double>(Ih1Re,-Ih1Im));
reIhj1j2j1pj2pdict[0][1][0][1] = pycuda::real(pycuda::complex<double>(Ih23Re,0.));
reIhj1j2j1pj2pdict[0][1][0][2] = pycuda::real(pycuda::complex<double>(Ih6Re,Ih6Im));
reIhj1j2j1pj2pdict[0][1][1][0] = pycuda::real(pycuda::complex<double>(Ih7Re,Ih7Im));
reIhj1j2j1pj2pdict[0][1][1][1] = pycuda::real(pycuda::complex<double>(Ih8Re,Ih8Im));
reIhj1j2j1pj2pdict[0][1][1][2] = pycuda::real(pycuda::complex<double>(Ih9Re,Ih9Im));
reIhj1j2j1pj2pdict[0][1][2][0] = pycuda::real(pycuda::complex<double>(Ih10Re,Ih10Im));
reIhj1j2j1pj2pdict[0][1][2][1] = pycuda::real(pycuda::complex<double>(Ih11Re,Ih11Im));
reIhj1j2j1pj2pdict[0][1][2][2] = pycuda::real(pycuda::complex<double>(Ih12Re,Ih12Im));
reIhj1j2j1pj2pdict[0][2][0][0] = pycuda::real(pycuda::complex<double>(Ih2Re,-Ih2Im));
reIhj1j2j1pj2pdict[0][2][0][1] = pycuda::real(pycuda::complex<double>(Ih6Re,-Ih6Im));
reIhj1j2j1pj2pdict[0][2][0][2] = pycuda::real(pycuda::complex<double>(Ih25Re,0.));
reIhj1j2j1pj2pdict[0][2][1][0] = pycuda::real(pycuda::complex<double>(Ih10Re,-Ih10Im));
reIhj1j2j1pj2pdict[0][2][1][1] = pycuda::real(pycuda::complex<double>(Ih13Re,Ih13Im));
reIhj1j2j1pj2pdict[0][2][1][2] = pycuda::real(pycuda::complex<double>(Ih14Re,Ih14Im));
reIhj1j2j1pj2pdict[0][2][2][0] = pycuda::real(pycuda::complex<double>(Ih15Re,Ih15Im));
reIhj1j2j1pj2pdict[0][2][2][1] = pycuda::real(pycuda::complex<double>(Ih16Re,Ih16Im));
reIhj1j2j1pj2pdict[0][2][2][2] = pycuda::real(pycuda::complex<double>(Ih17Re,Ih17Im));
reIhj1j2j1pj2pdict[1][0][0][0] = pycuda::real(pycuda::complex<double>(Ih1Re,-Ih1Im));
reIhj1j2j1pj2pdict[1][0][0][1] = pycuda::real(pycuda::complex<double>(Ih7Re,Ih7Im));
reIhj1j2j1pj2pdict[1][0][0][2] = pycuda::real(pycuda::complex<double>(Ih10Re,Ih10Im));
reIhj1j2j1pj2pdict[1][0][1][0] = pycuda::real(pycuda::complex<double>(Ih24Re,0.));
reIhj1j2j1pj2pdict[1][0][1][1] = pycuda::real(pycuda::complex<double>(Ih8Re,Ih8Im));
reIhj1j2j1pj2pdict[1][0][1][2] = pycuda::real(pycuda::complex<double>(Ih11Re,Ih11Im));
reIhj1j2j1pj2pdict[1][0][2][0] = pycuda::real(pycuda::complex<double>(Ih6Re,Ih6Im));
reIhj1j2j1pj2pdict[1][0][2][1] = pycuda::real(pycuda::complex<double>(Ih9Re,Ih9Im));
reIhj1j2j1pj2pdict[1][0][2][2] = pycuda::real(pycuda::complex<double>(Ih12Re,Ih12Im));
reIhj1j2j1pj2pdict[1][1][0][0] = pycuda::real(pycuda::complex<double>(Ih3Re,-Ih3Im));
reIhj1j2j1pj2pdict[1][1][0][1] = pycuda::real(pycuda::complex<double>(Ih8Re,-Ih8Im));
reIhj1j2j1pj2pdict[1][1][0][2] = pycuda::real(pycuda::complex<double>(Ih13Re,-Ih13Im));
reIhj1j2j1pj2pdict[1][1][1][0] = pycuda::real(pycuda::complex<double>(Ih8Re,-Ih8Im));
reIhj1j2j1pj2pdict[1][1][1][1] = pycuda::real(pycuda::complex<double>(Ih27Re,0.));
reIhj1j2j1pj2pdict[1][1][1][2] = pycuda::real(pycuda::complex<double>(Ih18Re,Ih18Im));
reIhj1j2j1pj2pdict[1][1][2][0] = pycuda::real(pycuda::complex<double>(Ih13Re,-Ih13Im));
reIhj1j2j1pj2pdict[1][1][2][1] = pycuda::real(pycuda::complex<double>(Ih18Re,Ih18Im));
reIhj1j2j1pj2pdict[1][1][2][2] = pycuda::real(pycuda::complex<double>(Ih19Re,Ih19Im));
reIhj1j2j1pj2pdict[1][2][0][0] = pycuda::real(pycuda::complex<double>(Ih4Re,-Ih4Im));
reIhj1j2j1pj2pdict[1][2][0][1] = pycuda::real(pycuda::complex<double>(Ih9Re,-Ih9Im));
reIhj1j2j1pj2pdict[1][2][0][2] = pycuda::real(pycuda::complex<double>(Ih14Re,-Ih14Im));
reIhj1j2j1pj2pdict[1][2][1][0] = pycuda::real(pycuda::complex<double>(Ih11Re,-Ih11Im));
reIhj1j2j1pj2pdict[1][2][1][1] = pycuda::real(pycuda::complex<double>(Ih18Re,-Ih18Im));
reIhj1j2j1pj2pdict[1][2][1][2] = pycuda::real(pycuda::complex<double>(Ih28Re,0.));
reIhj1j2j1pj2pdict[1][2][2][0] = pycuda::real(pycuda::complex<double>(Ih16Re,-Ih16Im));
reIhj1j2j1pj2pdict[1][2][2][1] = pycuda::real(pycuda::complex<double>(Ih20Re,Ih20Im));
reIhj1j2j1pj2pdict[1][2][2][2] = pycuda::real(pycuda::complex<double>(Ih21Re,Ih21Im));
reIhj1j2j1pj2pdict[2][0][0][0] = pycuda::real(pycuda::complex<double>(Ih2Re,-Ih2Im));
reIhj1j2j1pj2pdict[2][0][0][1] = pycuda::real(pycuda::complex<double>(Ih10Re,-Ih10Im));
reIhj1j2j1pj2pdict[2][0][0][2] = pycuda::real(pycuda::complex<double>(Ih15Re,Ih15Im));
reIhj1j2j1pj2pdict[2][0][1][0] = pycuda::real(pycuda::complex<double>(Ih6Re,-Ih6Im));
reIhj1j2j1pj2pdict[2][0][1][1] = pycuda::real(pycuda::complex<double>(Ih13Re,Ih13Im));
reIhj1j2j1pj2pdict[2][0][1][2] = pycuda::real(pycuda::complex<double>(Ih16Re,Ih16Im));
reIhj1j2j1pj2pdict[2][0][2][0] = pycuda::real(pycuda::complex<double>(Ih26Re,0.));
reIhj1j2j1pj2pdict[2][0][2][1] = pycuda::real(pycuda::complex<double>(Ih14Re,Ih14Im));
reIhj1j2j1pj2pdict[2][0][2][2] = pycuda::real(pycuda::complex<double>(Ih17Re,Ih17Im));
reIhj1j2j1pj2pdict[2][1][0][0] = pycuda::real(pycuda::complex<double>(Ih4Re,-Ih4Im));
reIhj1j2j1pj2pdict[2][1][0][1] = pycuda::real(pycuda::complex<double>(Ih11Re,-Ih11Im));
reIhj1j2j1pj2pdict[2][1][0][2] = pycuda::real(pycuda::complex<double>(Ih16Re,-Ih16Im));
reIhj1j2j1pj2pdict[2][1][1][0] = pycuda::real(pycuda::complex<double>(Ih9Re,-Ih9Im));
reIhj1j2j1pj2pdict[2][1][1][1] = pycuda::real(pycuda::complex<double>(Ih18Re,-Ih18Im));
reIhj1j2j1pj2pdict[2][1][1][2] = pycuda::real(pycuda::complex<double>(Ih20Re,Ih20Im));
reIhj1j2j1pj2pdict[2][1][2][0] = pycuda::real(pycuda::complex<double>(Ih14Re,-Ih14Im));
reIhj1j2j1pj2pdict[2][1][2][1] = pycuda::real(pycuda::complex<double>(Ih29Re,0.));
reIhj1j2j1pj2pdict[2][1][2][2] = pycuda::real(pycuda::complex<double>(Ih21Re,Ih21Im));
reIhj1j2j1pj2pdict[2][2][0][0] = pycuda::real(pycuda::complex<double>(Ih5Re,-Ih5Im));
reIhj1j2j1pj2pdict[2][2][0][1] = pycuda::real(pycuda::complex<double>(Ih12Re,-Ih12Im));
reIhj1j2j1pj2pdict[2][2][0][2] = pycuda::real(pycuda::complex<double>(Ih17Re,-Ih17Im));
reIhj1j2j1pj2pdict[2][2][1][0] = pycuda::real(pycuda::complex<double>(Ih12Re,-Ih12Im));
reIhj1j2j1pj2pdict[2][2][1][1] = pycuda::real(pycuda::complex<double>(Ih19Re,-Ih19Im));
reIhj1j2j1pj2pdict[2][2][1][2] = pycuda::real(pycuda::complex<double>(Ih21Re,-Ih21Im));
reIhj1j2j1pj2pdict[2][2][2][0] = pycuda::real(pycuda::complex<double>(Ih17Re,-Ih17Im));
reIhj1j2j1pj2pdict[2][2][2][1] = pycuda::real(pycuda::complex<double>(Ih21Re,-Ih21Im));
reIhj1j2j1pj2pdict[2][2][2][2] = pycuda::real(pycuda::complex<double>(Ih30Re,0.));
imIhj1j2j1pj2pdict[0][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih22Re,0.));
imIhj1j2j1pj2pdict[0][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih1Re,Ih1Im));
imIhj1j2j1pj2pdict[0][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih2Re,Ih2Im));
imIhj1j2j1pj2pdict[0][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,Ih1Im));
imIhj1j2j1pj2pdict[0][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih3Re,Ih3Im));
imIhj1j2j1pj2pdict[0][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih4Re,Ih4Im));
imIhj1j2j1pj2pdict[0][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,Ih2Im));
imIhj1j2j1pj2pdict[0][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih4Re,Ih4Im));
imIhj1j2j1pj2pdict[0][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih5Re,Ih5Im));
imIhj1j2j1pj2pdict[0][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,-Ih1Im));
imIhj1j2j1pj2pdict[0][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih23Re,0.));
imIhj1j2j1pj2pdict[0][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih6Re,Ih6Im));
imIhj1j2j1pj2pdict[0][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih7Re,Ih7Im));
imIhj1j2j1pj2pdict[0][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,Ih8Im));
imIhj1j2j1pj2pdict[0][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih9Re,Ih9Im));
imIhj1j2j1pj2pdict[0][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih10Re,Ih10Im));
imIhj1j2j1pj2pdict[0][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih11Re,Ih11Im));
imIhj1j2j1pj2pdict[0][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih12Re,Ih12Im));
imIhj1j2j1pj2pdict[0][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,-Ih2Im));
imIhj1j2j1pj2pdict[0][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih6Re,-Ih6Im));
imIhj1j2j1pj2pdict[0][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih25Re,0.));
imIhj1j2j1pj2pdict[0][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih10Re,-Ih10Im));
imIhj1j2j1pj2pdict[0][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih13Re,Ih13Im));
imIhj1j2j1pj2pdict[0][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih14Re,Ih14Im));
imIhj1j2j1pj2pdict[0][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih15Re,Ih15Im));
imIhj1j2j1pj2pdict[0][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih16Re,Ih16Im));
imIhj1j2j1pj2pdict[0][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,Ih17Im));
imIhj1j2j1pj2pdict[1][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih1Re,-Ih1Im));
imIhj1j2j1pj2pdict[1][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih7Re,Ih7Im));
imIhj1j2j1pj2pdict[1][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih10Re,Ih10Im));
imIhj1j2j1pj2pdict[1][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih24Re,0.));
imIhj1j2j1pj2pdict[1][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,Ih8Im));
imIhj1j2j1pj2pdict[1][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih11Re,Ih11Im));
imIhj1j2j1pj2pdict[1][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih6Re,Ih6Im));
imIhj1j2j1pj2pdict[1][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih9Re,Ih9Im));
imIhj1j2j1pj2pdict[1][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih12Re,Ih12Im));
imIhj1j2j1pj2pdict[1][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih3Re,-Ih3Im));
imIhj1j2j1pj2pdict[1][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih8Re,-Ih8Im));
imIhj1j2j1pj2pdict[1][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih13Re,-Ih13Im));
imIhj1j2j1pj2pdict[1][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih8Re,-Ih8Im));
imIhj1j2j1pj2pdict[1][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih27Re,0.));
imIhj1j2j1pj2pdict[1][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih18Re,Ih18Im));
imIhj1j2j1pj2pdict[1][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih13Re,-Ih13Im));
imIhj1j2j1pj2pdict[1][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,Ih18Im));
imIhj1j2j1pj2pdict[1][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih19Re,Ih19Im));
imIhj1j2j1pj2pdict[1][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih4Re,-Ih4Im));
imIhj1j2j1pj2pdict[1][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih9Re,-Ih9Im));
imIhj1j2j1pj2pdict[1][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih14Re,-Ih14Im));
imIhj1j2j1pj2pdict[1][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih11Re,-Ih11Im));
imIhj1j2j1pj2pdict[1][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,-Ih18Im));
imIhj1j2j1pj2pdict[1][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih28Re,0.));
imIhj1j2j1pj2pdict[1][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih16Re,-Ih16Im));
imIhj1j2j1pj2pdict[1][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih20Re,Ih20Im));
imIhj1j2j1pj2pdict[1][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,Ih21Im));
imIhj1j2j1pj2pdict[2][0][0][0] = pycuda::imag(pycuda::complex<double>(Ih2Re,-Ih2Im));
imIhj1j2j1pj2pdict[2][0][0][1] = pycuda::imag(pycuda::complex<double>(Ih10Re,-Ih10Im));
imIhj1j2j1pj2pdict[2][0][0][2] = pycuda::imag(pycuda::complex<double>(Ih15Re,Ih15Im));
imIhj1j2j1pj2pdict[2][0][1][0] = pycuda::imag(pycuda::complex<double>(Ih6Re,-Ih6Im));
imIhj1j2j1pj2pdict[2][0][1][1] = pycuda::imag(pycuda::complex<double>(Ih13Re,Ih13Im));
imIhj1j2j1pj2pdict[2][0][1][2] = pycuda::imag(pycuda::complex<double>(Ih16Re,Ih16Im));
imIhj1j2j1pj2pdict[2][0][2][0] = pycuda::imag(pycuda::complex<double>(Ih26Re,0.));
imIhj1j2j1pj2pdict[2][0][2][1] = pycuda::imag(pycuda::complex<double>(Ih14Re,Ih14Im));
imIhj1j2j1pj2pdict[2][0][2][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,Ih17Im));
imIhj1j2j1pj2pdict[2][1][0][0] = pycuda::imag(pycuda::complex<double>(Ih4Re,-Ih4Im));
imIhj1j2j1pj2pdict[2][1][0][1] = pycuda::imag(pycuda::complex<double>(Ih11Re,-Ih11Im));
imIhj1j2j1pj2pdict[2][1][0][2] = pycuda::imag(pycuda::complex<double>(Ih16Re,-Ih16Im));
imIhj1j2j1pj2pdict[2][1][1][0] = pycuda::imag(pycuda::complex<double>(Ih9Re,-Ih9Im));
imIhj1j2j1pj2pdict[2][1][1][1] = pycuda::imag(pycuda::complex<double>(Ih18Re,-Ih18Im));
imIhj1j2j1pj2pdict[2][1][1][2] = pycuda::imag(pycuda::complex<double>(Ih20Re,Ih20Im));
imIhj1j2j1pj2pdict[2][1][2][0] = pycuda::imag(pycuda::complex<double>(Ih14Re,-Ih14Im));
imIhj1j2j1pj2pdict[2][1][2][1] = pycuda::imag(pycuda::complex<double>(Ih29Re,0.));
imIhj1j2j1pj2pdict[2][1][2][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,Ih21Im));
imIhj1j2j1pj2pdict[2][2][0][0] = pycuda::imag(pycuda::complex<double>(Ih5Re,-Ih5Im));
imIhj1j2j1pj2pdict[2][2][0][1] = pycuda::imag(pycuda::complex<double>(Ih12Re,-Ih12Im));
imIhj1j2j1pj2pdict[2][2][0][2] = pycuda::imag(pycuda::complex<double>(Ih17Re,-Ih17Im));
imIhj1j2j1pj2pdict[2][2][1][0] = pycuda::imag(pycuda::complex<double>(Ih12Re,-Ih12Im));
imIhj1j2j1pj2pdict[2][2][1][1] = pycuda::imag(pycuda::complex<double>(Ih19Re,-Ih19Im));
imIhj1j2j1pj2pdict[2][2][1][2] = pycuda::imag(pycuda::complex<double>(Ih21Re,-Ih21Im));
imIhj1j2j1pj2pdict[2][2][2][0] = pycuda::imag(pycuda::complex<double>(Ih17Re,-Ih17Im));
imIhj1j2j1pj2pdict[2][2][2][1] = pycuda::imag(pycuda::complex<double>(Ih21Re,-Ih21Im));
imIhj1j2j1pj2pdict[2][2][2][2] = pycuda::imag(pycuda::complex<double>(Ih30Re,0.));
}
__device__ void set_buffer_rew_terms(double *mass_integrals, int iev) {
Im00 = mass_integrals[0];
Im01 = mass_integrals[1];
Im10 = mass_integrals[2];
Im02 = mass_integrals[3];
Im20 = mass_integrals[4];
Im11 = mass_integrals[5];
Im12 = mass_integrals[6];
Im21 = mass_integrals[7];
Im22 = mass_integrals[8];
IT_cosh_MCrew[iev] = 4.*gamma_Bs_freq/(-pow(delta_gamma_freq,2) + 4.*pow(gamma_Bs_freq,2));
IT_sinh_MCrew[iev] = 2.*delta_gamma_freq/(-pow(delta_gamma_freq,2) + 4.*pow(gamma_Bs_freq,2));
IT_cos_MCrew[iev] = gamma_Bs_freq/(pow(delta_m_freq,2) + pow(gamma_Bs_freq,2));
IT_sin_MCrew[iev] = delta_m_freq/(pow(delta_m_freq,2) + pow(gamma_Bs_freq,2));
for (int i=0; i<18; ++i) {fi_cos1_MCrew[i][iev] = fi(cos1_MCrew[iev],i+1);}
for (int i=0; i<18; ++i) {fi_cos2_MCrew[i][iev] = fi(cos2_MCrew[iev],i+1);}
for (int i=0; i<15; ++i) {gi_MCrew[i][iev] = gi(phi_MCrew[iev],i+1);}
for (int j1=0; j1<3; ++j1) {
for (int j2=0; j2<3; ++j2) {
pycuda::complex<double> M_temp = Mj1j2(m1_MCrew[iev],m2_MCrew[iev],j1,j2);
reMj1j2_MCrew[j1][j2][iev] = pycuda::real(M_temp);
imMj1j2_MCrew[j1][j2][iev] = pycuda::imag(M_temp);
}
}
phasespace_MCrew[iev] = phasespace(m1_MCrew[iev],m2_MCrew[iev]);
}
__device__ pycuda::complex<double> hj1j2j1pj2p_MCrew(int j1, int j2, int j1p, int j2p, int iev) {
return Mj1j2_MCrew(j1,j2,iev)*pycuda::conj(Mj1j2_MCrew(j1p,j2p,iev))*phasespace_MCrew[iev];
}
__device__ double fjjphhp_cos1_MCrew(int j, int jp, int h, int hp, int iev) {
return fi_cos1_MCrew[(int) fjjphhpindexdict[j][jp][h][hp]-1][iev];
}
__device__ double fjjphhp_cos2_MCrew(int j, int jp, int h, int hp, int iev) {
return fi_cos2_MCrew[(int) fjjphhpindexdict[j][jp][h][hp]-1][iev];
}
__device__ double ghhp_phi_MCrew(int h, int hp, int iev) {
return gi_MCrew[(int) ghhpindexdict[h][hp]-1][iev];
}
__device__ double comp_rew_phys_model(int j1, int j2, int h, int j1p, int j2p, int hp, int iev) {
return pycuda::real(((IT_cosh_MCrew[iev]*M_Average(j1,j2,h,j1p,j2p,hp)-IT_sinh_MCrew[iev]*M_DeltaGamma(j1,j2,h,j1p,j2p,hp))+DCP_prod*(IT_cos_MCrew[iev]*M_DirCP(j1,j2,h,j1p,j2p,hp)+IT_sin_MCrew[iev]*M_MixCP(j1,j2,h,j1p,j2p,hp)))*Nj1j2hj1pj2php(j1,j2,h,j1p,j2p,hp)*hj1j2j1pj2p_MCrew(j1,j2,j1p,j2p,iev))*ghhp_phi_MCrew(h,hp,iev)*fjjphhp_cos1_MCrew(j1,j1p,h,hp,iev)*fjjphhp_cos2_MCrew(j2,j2p,h,hp,iev);
}
__device__ double rew_phys_model(int iev) {
return comp_rew_phys_model(0,0,0,0,0,0,iev)+comp_rew_phys_model(0,1,0,0,1,0,iev)+comp_rew_phys_model(0,2,0,0,2,0,iev)+comp_rew_phys_model(1,0,0,1,0,0,iev)+comp_rew_phys_model(1,1,0,1,1,0,iev)+comp_rew_phys_model(1,1,1,1,1,1,iev)
+comp_rew_phys_model(1,1,2,1,1,2,iev)+comp_rew_phys_model(1,2,0,1,2,0,iev)+comp_rew_phys_model(1,2,1,1,2,1,iev)+comp_rew_phys_model(1,2,2,1,2,2,iev)+comp_rew_phys_model(2,0,0,2,0,0,iev)+comp_rew_phys_model(2,1,0,2,1,0,iev)+comp_rew_phys_model(2,1,1,2,1,1,iev)
+comp_rew_phys_model(2,1,2,2,1,2,iev)+comp_rew_phys_model(2,2,0,2,2,0,iev)+comp_rew_phys_model(2,2,1,2,2,1,iev)+comp_rew_phys_model(2,2,2,2,2,2,iev)+comp_rew_phys_model(2,2,3,2,2,3,iev)+comp_rew_phys_model(2,2,4,2,2,4,iev)+2.*comp_rew_phys_model(0,1,0,0,0,0,iev)
+2.*comp_rew_phys_model(0,1,0,1,0,0,iev)+2.*comp_rew_phys_model(0,1,0,2,0,0,iev)+2.*comp_rew_phys_model(0,2,0,0,0,0,iev)+2.*comp_rew_phys_model(0,2,0,0,1,0,iev)+2.*comp_rew_phys_model(0,2,0,1,0,0,iev)+2.*comp_rew_phys_model(0,2,0,1,1,0,iev)
+2.*comp_rew_phys_model(0,2,0,2,0,0,iev)+2.*comp_rew_phys_model(0,2,0,2,1,0,iev)+2.*comp_rew_phys_model(1,0,0,0,0,0,iev)+2.*comp_rew_phys_model(1,1,0,0,0,0,iev)+2.*comp_rew_phys_model(1,1,0,0,1,0,iev)+2.*comp_rew_phys_model(1,1,0,1,0,0,iev)
+2.*comp_rew_phys_model(1,1,0,2,0,0,iev)+2.*comp_rew_phys_model(1,1,1,0,0,0,iev)+2.*comp_rew_phys_model(1,1,1,0,1,0,iev)+2.*comp_rew_phys_model(1,1,1,0,2,0,iev)+2.*comp_rew_phys_model(1,1,1,1,0,0,iev)+2.*comp_rew_phys_model(1,1,1,1,1,0,iev)
+2.*comp_rew_phys_model(1,1,1,1,2,0,iev)+2.*comp_rew_phys_model(1,1,1,2,0,0,iev)+2.*comp_rew_phys_model(1,1,1,2,1,0,iev)+2.*comp_rew_phys_model(1,1,1,2,2,0,iev)+2.*comp_rew_phys_model(1,1,2,0,0,0,iev)+2.*comp_rew_phys_model(1,1,2,0,1,0,iev)
+2.*comp_rew_phys_model(1,1,2,0,2,0,iev)+2.*comp_rew_phys_model(1,1,2,1,0,0,iev)+2.*comp_rew_phys_model(1,1,2,1,1,0,iev)+2.*comp_rew_phys_model(1,1,2,1,1,1,iev)+2.*comp_rew_phys_model(1,1,2,1,2,0,iev)+2.*comp_rew_phys_model(1,1,2,1,2,1,iev)
+2.*comp_rew_phys_model(1,1,2,2,0,0,iev)+2.*comp_rew_phys_model(1,1,2,2,1,0,iev)+2.*comp_rew_phys_model(1,1,2,2,1,1,iev)+2.*comp_rew_phys_model(1,1,2,2,2,0,iev)+2.*comp_rew_phys_model(1,1,2,2,2,1,iev)+2.*comp_rew_phys_model(1,2,0,0,0,0,iev)
+2.*comp_rew_phys_model(1,2,0,0,1,0,iev)+2.*comp_rew_phys_model(1,2,0,0,2,0,iev)+2.*comp_rew_phys_model(1,2,0,1,0,0,iev)+2.*comp_rew_phys_model(1,2,0,1,1,0,iev)+2.*comp_rew_phys_model(1,2,0,2,0,0,iev)+2.*comp_rew_phys_model(1,2,0,2,1,0,iev)
+2.*comp_rew_phys_model(1,2,1,0,0,0,iev)+2.*comp_rew_phys_model(1,2,1,0,1,0,iev)+2.*comp_rew_phys_model(1,2,1,0,2,0,iev)+2.*comp_rew_phys_model(1,2,1,1,0,0,iev)+2.*comp_rew_phys_model(1,2,1,1,1,0,iev)+2.*comp_rew_phys_model(1,2,1,1,1,1,iev)
+2.*comp_rew_phys_model(1,2,1,1,2,0,iev)+2.*comp_rew_phys_model(1,2,1,2,0,0,iev)+2.*comp_rew_phys_model(1,2,1,2,1,0,iev)+2.*comp_rew_phys_model(1,2,1,2,1,1,iev)+2.*comp_rew_phys_model(1,2,1,2,2,0,iev)+2.*comp_rew_phys_model(1,2,2,0,0,0,iev)
+2.*comp_rew_phys_model(1,2,2,0,1,0,iev)+2.*comp_rew_phys_model(1,2,2,0,2,0,iev)+2.*comp_rew_phys_model(1,2,2,1,0,0,iev)+2.*comp_rew_phys_model(1,2,2,1,1,0,iev)+2.*comp_rew_phys_model(1,2,2,1,1,1,iev)+2.*comp_rew_phys_model(1,2,2,1,1,2,iev)
+2.*comp_rew_phys_model(1,2,2,1,2,0,iev)+2.*comp_rew_phys_model(1,2,2,1,2,1,iev)+2.*comp_rew_phys_model(1,2,2,2,0,0,iev)+2.*comp_rew_phys_model(1,2,2,2,1,0,iev)+2.*comp_rew_phys_model(1,2,2,2,1,1,iev)+2.*comp_rew_phys_model(1,2,2,2,1,2,iev)
+2.*comp_rew_phys_model(1,2,2,2,2,0,iev)+2.*comp_rew_phys_model(1,2,2,2,2,1,iev)+2.*comp_rew_phys_model(2,0,0,0,0,0,iev)+2.*comp_rew_phys_model(2,0,0,1,0,0,iev)+2.*comp_rew_phys_model(2,1,0,0,0,0,iev)+2.*comp_rew_phys_model(2,1,0,0,1,0,iev)
+2.*comp_rew_phys_model(2,1,0,1,0,0,iev)+2.*comp_rew_phys_model(2,1,0,1,1,0,iev)+2.*comp_rew_phys_model(2,1,0,2,0,0,iev)+2.*comp_rew_phys_model(2,1,1,0,0,0,iev)+2.*comp_rew_phys_model(2,1,1,0,1,0,iev)+2.*comp_rew_phys_model(2,1,1,0,2,0,iev)
+2.*comp_rew_phys_model(2,1,1,1,0,0,iev)+2.*comp_rew_phys_model(2,1,1,1,1,0,iev)+2.*comp_rew_phys_model(2,1,1,1,1,1,iev)+2.*comp_rew_phys_model(2,1,1,1,2,0,iev)+2.*comp_rew_phys_model(2,1,1,2,0,0,iev)+2.*comp_rew_phys_model(2,1,1,2,1,0,iev)
+2.*comp_rew_phys_model(2,1,1,2,2,0,iev)+2.*comp_rew_phys_model(2,1,2,0,0,0,iev)+2.*comp_rew_phys_model(2,1,2,0,1,0,iev)+2.*comp_rew_phys_model(2,1,2,0,2,0,iev)+2.*comp_rew_phys_model(2,1,2,1,0,0,iev)+2.*comp_rew_phys_model(2,1,2,1,1,0,iev)
+2.*comp_rew_phys_model(2,1,2,1,1,1,iev)+2.*comp_rew_phys_model(2,1,2,1,1,2,iev)+2.*comp_rew_phys_model(2,1,2,1,2,0,iev)+2.*comp_rew_phys_model(2,1,2,1,2,1,iev)+2.*comp_rew_phys_model(2,1,2,2,0,0,iev)+2.*comp_rew_phys_model(2,1,2,2,1,0,iev)
+2.*comp_rew_phys_model(2,1,2,2,1,1,iev)+2.*comp_rew_phys_model(2,1,2,2,2,0,iev)+2.*comp_rew_phys_model(2,1,2,2,2,1,iev)+2.*comp_rew_phys_model(2,2,0,0,0,0,iev)+2.*comp_rew_phys_model(2,2,0,0,1,0,iev)+2.*comp_rew_phys_model(2,2,0,0,2,0,iev)
+2.*comp_rew_phys_model(2,2,0,1,0,0,iev)+2.*comp_rew_phys_model(2,2,0,1,1,0,iev)+2.*comp_rew_phys_model(2,2,0,1,2,0,iev)+2.*comp_rew_phys_model(2,2,0,2,0,0,iev)+2.*comp_rew_phys_model(2,2,0,2,1,0,iev)+2.*comp_rew_phys_model(2,2,1,0,0,0,iev)
+2.*comp_rew_phys_model(2,2,1,0,1,0,iev)+2.*comp_rew_phys_model(2,2,1,0,2,0,iev)+2.*comp_rew_phys_model(2,2,1,1,0,0,iev)+2.*comp_rew_phys_model(2,2,1,1,1,0,iev)+2.*comp_rew_phys_model(2,2,1,1,1,1,iev)+2.*comp_rew_phys_model(2,2,1,1,2,0,iev)
+2.*comp_rew_phys_model(2,2,1,1,2,1,iev)+2.*comp_rew_phys_model(2,2,1,2,0,0,iev)+2.*comp_rew_phys_model(2,2,1,2,1,0,iev)+2.*comp_rew_phys_model(2,2,1,2,1,1,iev)+2.*comp_rew_phys_model(2,2,1,2,2,0,iev)+2.*comp_rew_phys_model(2,2,2,0,0,0,iev)
+2.*comp_rew_phys_model(2,2,2,0,1,0,iev)+2.*comp_rew_phys_model(2,2,2,0,2,0,iev)+2.*comp_rew_phys_model(2,2,2,1,0,0,iev)+2.*comp_rew_phys_model(2,2,2,1,1,0,iev)+2.*comp_rew_phys_model(2,2,2,1,1,1,iev)+2.*comp_rew_phys_model(2,2,2,1,1,2,iev)
+2.*comp_rew_phys_model(2,2,2,1,2,0,iev)+2.*comp_rew_phys_model(2,2,2,1,2,1,iev)+2.*comp_rew_phys_model(2,2,2,1,2,2,iev)+2.*comp_rew_phys_model(2,2,2,2,0,0,iev)+2.*comp_rew_phys_model(2,2,2,2,1,0,iev)+2.*comp_rew_phys_model(2,2,2,2,1,1,iev)
+2.*comp_rew_phys_model(2,2,2,2,1,2,iev)+2.*comp_rew_phys_model(2,2,2,2,2,0,iev)+2.*comp_rew_phys_model(2,2,2,2,2,1,iev)+2.*comp_rew_phys_model(2,2,3,0,0,0,iev)+2.*comp_rew_phys_model(2,2,3,0,1,0,iev)+2.*comp_rew_phys_model(2,2,3,0,2,0,iev)
+2.*comp_rew_phys_model(2,2,3,1,0,0,iev)+2.*comp_rew_phys_model(2,2,3,1,1,0,iev)+2.*comp_rew_phys_model(2,2,3,1,1,1,iev)+2.*comp_rew_phys_model(2,2,3,1,1,2,iev)+2.*comp_rew_phys_model(2,2,3,1,2,0,iev)+2.*comp_rew_phys_model(2,2,3,1,2,1,iev)
+2.*comp_rew_phys_model(2,2,3,1,2,2,iev)+2.*comp_rew_phys_model(2,2,3,2,0,0,iev)+2.*comp_rew_phys_model(2,2,3,2,1,0,iev)+2.*comp_rew_phys_model(2,2,3,2,1,1,iev)+2.*comp_rew_phys_model(2,2,3,2,1,2,iev)+2.*comp_rew_phys_model(2,2,3,2,2,0,iev)
+2.*comp_rew_phys_model(2,2,3,2,2,1,iev)+2.*comp_rew_phys_model(2,2,3,2,2,2,iev)+2.*comp_rew_phys_model(2,2,4,0,0,0,iev)+2.*comp_rew_phys_model(2,2,4,0,1,0,iev)+2.*comp_rew_phys_model(2,2,4,0,2,0,iev)+2.*comp_rew_phys_model(2,2,4,1,0,0,iev)
+2.*comp_rew_phys_model(2,2,4,1,1,0,iev)+2.*comp_rew_phys_model(2,2,4,1,1,1,iev)+2.*comp_rew_phys_model(2,2,4,1,1,2,iev)+2.*comp_rew_phys_model(2,2,4,1,2,0,iev)+2.*comp_rew_phys_model(2,2,4,1,2,1,iev)+2.*comp_rew_phys_model(2,2,4,1,2,2,iev)
+2.*comp_rew_phys_model(2,2,4,2,0,0,iev)+2.*comp_rew_phys_model(2,2,4,2,1,0,iev)+2.*comp_rew_phys_model(2,2,4,2,1,1,iev)+2.*comp_rew_phys_model(2,2,4,2,1,2,iev)+2.*comp_rew_phys_model(2,2,4,2,2,0,iev)+2.*comp_rew_phys_model(2,2,4,2,2,1,iev)
+2.*comp_rew_phys_model(2,2,4,2,2,2,iev)+2.*comp_rew_phys_model(2,2,4,2,2,3,iev);
}
__global__ void compute_phys_weight(double *MCdata, double *out, double *re_amps, double *dirCP_asyms, double *im_amps, double *weak_phases, double *mixing_params, double *calib_params, double *mass_integrals, int NMCevts) {
int row = threadIdx.x + blockDim.x * blockIdx.x;
if (row >= NMCevts) { return;}
int i0 = row*6;
m1_MCrew[row] = MCdata[0 + i0];
m2_MCrew[row] = MCdata[1 + i0];
cos1_MCrew[row] = MCdata[2 + i0];
cos2_MCrew[row] = MCdata[3 + i0];
phi_MCrew[row] = MCdata[4 + i0];
double weight_invgen = MCdata[5 + i0];
set_buffer_amplitudes(re_amps,dirCP_asyms,im_amps,weak_phases,mixing_params,calib_params);
set_buffer_rew_terms(mass_integrals,row);
double weight_phys = rew_phys_model(row);
out[row] = 1.e4*weight_invgen*weight_phys;
}
}
|
0cc8068dd76c72b7516038d06db7ee61b728c57e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <cstring>
#include <string>
#include <sstream>
#include <thrust/host_vector.h>
#define T_P_B 1024
__global__ void heat2D(float *in, float *out, float *d_candles, int cells, float k, int width) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < cells) {
float top = in[idx];
float bot = in[idx];
float left = in[idx];
float right = in[idx];
if (idx >= width) //top row
top = in[idx - width];
if (idx < (cells - width)) //bottom row
bot = in[idx + width];
if (idx % width != 0) //left column
left = in[idx - 1];
if ((idx + 1) % width != 0) //right column
right = in[idx + 1];
if (d_candles[idx] >= 0)
out[idx] = d_candles[idx];
else
out[idx] = in[idx] + k * (top + bot + right + left - 4 * in[idx]);
}
}
int main(int argc, char *argv[]) {
if (argc != 4)
{
std::cout << "Incorrect usage" << std::endl;
return -1;
}
// declare vars
InputImage imageObj(argv[2]);
int length, totalLength;
Complex *image = nullptr;
// values from input file
length = imageObj.get_width();
totalLength = length * length;
image = imageObj.get_image_data();
//create stationary heating source grid
int cells = width * height * depth;
float candles[cells] = {};
for(int i = 0; i < cells; ++i) {
candles[i] = -1;
}
//input candle information
int location_x, location_y, candle_width, candle_height;
int location_z = 0, candle_depth = 1;
float fixed_temperature;
int ind;
for (int i = 0; i < static_heat.size()/info_size; i++) {
ind = i * info_size;
location_x = static_heat[0 + ind];
location_y = static_heat[1 + ind];
if (!mode) {
candle_width = static_heat[2 + ind];
candle_height = static_heat[3 + ind];
fixed_temperature = static_heat[4 + ind];
} else {
location_z = static_heat[2 + ind];
candle_width = static_heat[3 + ind];
candle_height = static_heat[4 + ind];
candle_depth = static_heat[5 + ind];
fixed_temperature = static_heat[6 + ind];
}
for (int z = location_z; z < location_z + candle_depth; z++) {
for (int y = location_y; y < location_y + candle_height; y++) {
for (int x = location_x; x < location_x + candle_width; x++) {
candles[x + y * width + z * width * height] = fixed_temperature;
}
}
}
}
//cuda code
float *d_gridA, *d_gridB, *d_candles;
hipMalloc((void**)&d_gridA, cells * sizeof(float));
hipMalloc((void**)&d_gridB, cells * sizeof(float));
hipMalloc((void**)&d_candles, cells * sizeof(float));
float gridA[cells], gridB[cells];
//initialize starting grid
for(int i = 0; i < cells; ++i)
gridA[i] = starting_temp;
//copy candles
for (int i = 0; i < cells; i++) {
if (candles[i] >= 0)
gridA[i] = candles[i];
}
hipMemcpy(d_gridA, gridA, cells * sizeof(float), hipMemcpyHostToDevice);
hipMemcpy(d_candles, candles, cells * sizeof(float), hipMemcpyHostToDevice);
float *in, *out;
int i;
for (i = 0; i < timestep; i++) {
if (i % 2) {
in = d_gridB;
out = d_gridA;
} else {
in = d_gridA;
out = d_gridB;
}
if (mode == 0)
hipLaunchKernelGGL(( heat2D), dim3((cells + T_P_B-1) / T_P_B), dim3(T_P_B), 0, 0, in, out, d_candles, cells, k, width);
else
hipLaunchKernelGGL(( heat3D), dim3((cells + T_P_B-1) / T_P_B), dim3(T_P_B), 0, 0, in, out, d_candles, cells, k, width, height);
}
//set up answer
float *answer;
if (i % 2) {
hipMemcpy(gridB, d_gridB, cells*sizeof(float), hipMemcpyDeviceToHost);
answer = gridB;
} else {
hipMemcpy(gridA, d_gridA, cells*sizeof(float), hipMemcpyDeviceToHost);
answer = gridA;
}
hipFree(d_gridA);
hipFree(d_gridB);
hipFree(d_candles);
//print answers
std::ofstream myfile;
myfile.open("heatOutput.csv");
for(int i = 0; i < cells; ++i) {
myfile << answer[i];
if ((((i + 1) % width) == 0) && i > 0 && i < (cells - 1))
myfile << std::endl;
else
myfile << ",";
if ((((i + 1) % (width * height)) == 0) && i < (cells - 1))
myfile << std::endl;
}
myfile.close();
return 0;
} | 0cc8068dd76c72b7516038d06db7ee61b728c57e.cu | #include <stdio.h>
#include <algorithm>
#include <iostream>
#include <fstream>
#include <cstring>
#include <string>
#include <sstream>
#include <thrust/host_vector.h>
#define T_P_B 1024
__global__ void heat2D(float *in, float *out, float *d_candles, int cells, float k, int width) {
int idx = threadIdx.x + blockIdx.x * blockDim.x;
if(idx < cells) {
float top = in[idx];
float bot = in[idx];
float left = in[idx];
float right = in[idx];
if (idx >= width) //top row
top = in[idx - width];
if (idx < (cells - width)) //bottom row
bot = in[idx + width];
if (idx % width != 0) //left column
left = in[idx - 1];
if ((idx + 1) % width != 0) //right column
right = in[idx + 1];
if (d_candles[idx] >= 0)
out[idx] = d_candles[idx];
else
out[idx] = in[idx] + k * (top + bot + right + left - 4 * in[idx]);
}
}
int main(int argc, char *argv[]) {
if (argc != 4)
{
std::cout << "Incorrect usage" << std::endl;
return -1;
}
// declare vars
InputImage imageObj(argv[2]);
int length, totalLength;
Complex *image = nullptr;
// values from input file
length = imageObj.get_width();
totalLength = length * length;
image = imageObj.get_image_data();
//create stationary heating source grid
int cells = width * height * depth;
float candles[cells] = {};
for(int i = 0; i < cells; ++i) {
candles[i] = -1;
}
//input candle information
int location_x, location_y, candle_width, candle_height;
int location_z = 0, candle_depth = 1;
float fixed_temperature;
int ind;
for (int i = 0; i < static_heat.size()/info_size; i++) {
ind = i * info_size;
location_x = static_heat[0 + ind];
location_y = static_heat[1 + ind];
if (!mode) {
candle_width = static_heat[2 + ind];
candle_height = static_heat[3 + ind];
fixed_temperature = static_heat[4 + ind];
} else {
location_z = static_heat[2 + ind];
candle_width = static_heat[3 + ind];
candle_height = static_heat[4 + ind];
candle_depth = static_heat[5 + ind];
fixed_temperature = static_heat[6 + ind];
}
for (int z = location_z; z < location_z + candle_depth; z++) {
for (int y = location_y; y < location_y + candle_height; y++) {
for (int x = location_x; x < location_x + candle_width; x++) {
candles[x + y * width + z * width * height] = fixed_temperature;
}
}
}
}
//cuda code
float *d_gridA, *d_gridB, *d_candles;
cudaMalloc((void**)&d_gridA, cells * sizeof(float));
cudaMalloc((void**)&d_gridB, cells * sizeof(float));
cudaMalloc((void**)&d_candles, cells * sizeof(float));
float gridA[cells], gridB[cells];
//initialize starting grid
for(int i = 0; i < cells; ++i)
gridA[i] = starting_temp;
//copy candles
for (int i = 0; i < cells; i++) {
if (candles[i] >= 0)
gridA[i] = candles[i];
}
cudaMemcpy(d_gridA, gridA, cells * sizeof(float), cudaMemcpyHostToDevice);
cudaMemcpy(d_candles, candles, cells * sizeof(float), cudaMemcpyHostToDevice);
float *in, *out;
int i;
for (i = 0; i < timestep; i++) {
if (i % 2) {
in = d_gridB;
out = d_gridA;
} else {
in = d_gridA;
out = d_gridB;
}
if (mode == 0)
heat2D<<<(cells + T_P_B-1) / T_P_B, T_P_B>>>(in, out, d_candles, cells, k, width);
else
heat3D<<<(cells + T_P_B-1) / T_P_B, T_P_B>>>(in, out, d_candles, cells, k, width, height);
}
//set up answer
float *answer;
if (i % 2) {
cudaMemcpy(gridB, d_gridB, cells*sizeof(float), cudaMemcpyDeviceToHost);
answer = gridB;
} else {
cudaMemcpy(gridA, d_gridA, cells*sizeof(float), cudaMemcpyDeviceToHost);
answer = gridA;
}
cudaFree(d_gridA);
cudaFree(d_gridB);
cudaFree(d_candles);
//print answers
std::ofstream myfile;
myfile.open("heatOutput.csv");
for(int i = 0; i < cells; ++i) {
myfile << answer[i];
if ((((i + 1) % width) == 0) && i > 0 && i < (cells - 1))
myfile << std::endl;
else
myfile << ",";
if ((((i + 1) % (width * height)) == 0) && i < (cells - 1))
myfile << std::endl;
}
myfile.close();
return 0;
} |
1792f93fe9532c27214749a404eaf4332d859172.hip | // !!! This is a file automatically generated by hipify!!!
/***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*############################################################################*/
// includes, system
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
// includes, project
#include "main.h"
#include "lbm.h"
#ifndef __MCUDA__
#include <hip/hip_runtime.h>
#else
#include <mcuda.h>
#endif
#define DFL1 (1.0f/ 3.0f)
#define DFL2 (1.0f/18.0f)
#define DFL3 (1.0f/36.0f)
// includes, kernels
#include "lbm_kernel.cu"
#define REAL_MARGIN (CALC_INDEX(0, 0, 2, 0) - CALC_INDEX(0,0,0,0))
#define TOTAL_MARGIN (2*PADDED_X*PADDED_Y*N_CELL_ENTRIES)
/******************************************************************************/
void CUDA_LBM_performStreamCollide( LBM_Grid srcGrid, LBM_Grid dstGrid ) {
dim3 dimBlock, dimGrid;
dimBlock.x = SIZE_X;
dimGrid.x = SIZE_Y;
dimGrid.y = SIZE_Z;
dimBlock.y = dimBlock.z = dimGrid.z = 1;
hipLaunchKernelGGL(( performStreamCollide_kernel), dim3(dimGrid), dim3(dimBlock), 0, 0, srcGrid, dstGrid);
CUDA_ERRCK;
}
/*############################################################################*/
void LBM_allocateGrid( float** ptr ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
*ptr = (float*)malloc( size );
if( ! *ptr ) {
printf( "LBM_allocateGrid: could not allocate %.1f MByte\n",
size / (1024.0*1024.0) );
exit( 1 );
}
memset( *ptr, 0, size );
printf( "LBM_allocateGrid: allocated %.1f MByte\n",
size / (1024.0*1024.0) );
*ptr += REAL_MARGIN;
}
/******************************************************************************/
void CUDA_LBM_allocateGrid( float** ptr ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
hipMalloc((void**)ptr, size);
CUDA_ERRCK;
*ptr += REAL_MARGIN;
}
/*############################################################################*/
void LBM_freeGrid( float** ptr ) {
free( *ptr-REAL_MARGIN );
*ptr = NULL;
}
/******************************************************************************/
void CUDA_LBM_freeGrid( float** ptr ) {
hipFree( *ptr-REAL_MARGIN );
*ptr = NULL;
}
/*############################################################################*/
void LBM_initializeGrid( LBM_Grid grid ) {
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
SRC_C( grid ) = DFL1;
SRC_N( grid ) = DFL2;
SRC_S( grid ) = DFL2;
SRC_E( grid ) = DFL2;
SRC_W( grid ) = DFL2;
SRC_T( grid ) = DFL2;
SRC_B( grid ) = DFL2;
SRC_NE( grid ) = DFL3;
SRC_NW( grid ) = DFL3;
SRC_SE( grid ) = DFL3;
SRC_SW( grid ) = DFL3;
SRC_NT( grid ) = DFL3;
SRC_NB( grid ) = DFL3;
SRC_ST( grid ) = DFL3;
SRC_SB( grid ) = DFL3;
SRC_ET( grid ) = DFL3;
SRC_EB( grid ) = DFL3;
SRC_WT( grid ) = DFL3;
SRC_WB( grid ) = DFL3;
CLEAR_ALL_FLAGS_SWEEP( grid );
SWEEP_END
}
/******************************************************************************/
void CUDA_LBM_initializeGrid( float** d_grid, float** h_grid ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
hipMemcpy(*d_grid - REAL_MARGIN, *h_grid - REAL_MARGIN, size, hipMemcpyHostToDevice);
CUDA_ERRCK;
}
void CUDA_LBM_getDeviceGrid( float** d_grid, float** h_grid ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
hipDeviceSynchronize();
CUDA_ERRCK;
hipMemcpy(*h_grid - REAL_MARGIN, *d_grid - REAL_MARGIN, size, hipMemcpyDeviceToHost);
CUDA_ERRCK;
}
/*############################################################################*/
void LBM_swapGrids( LBM_GridPtr grid1, LBM_GridPtr grid2 ) {
LBM_Grid aux = *grid1;
*grid1 = *grid2;
*grid2 = aux;
}
/*############################################################################*/
void LBM_loadObstacleFile( LBM_Grid grid, const char* filename ) {
int x, y, z;
FILE* file = fopen( filename, "rb" );
for( z = 0; z < SIZE_Z; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( fgetc( file ) != '.' ) SET_FLAG( grid, x, y, z, OBSTACLE );
}
fgetc( file );
}
fgetc( file );
}
fclose( file );
}
/*############################################################################*/
void LBM_initializeSpecialCellsForLDC( LBM_Grid grid ) {
int x, y, z;
for( z = -2; z < SIZE_Z+2; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( x == 0 || x == SIZE_X-1 ||
y == 0 || y == SIZE_Y-1 ||
z == 0 || z == SIZE_Z-1 ) {
SET_FLAG( grid, x, y, z, OBSTACLE );
}
else {
if( (z == 1 || z == SIZE_Z-2) &&
x > 1 && x < SIZE_X-2 &&
y > 1 && y < SIZE_Y-2 ) {
SET_FLAG( grid, x, y, z, ACCEL );
}
}
}
}
}
}
/*############################################################################*/
void LBM_showGridStatistics( LBM_Grid grid ) {
int nObstacleCells = 0,
nAccelCells = 0,
nFluidCells = 0;
float ux, uy, uz;
float minU2 = 1e+30, maxU2 = -1e+30, u2;
float minRho = 1e+30, maxRho = -1e+30, rho;
float mass = 0;
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
rho = LOCAL( grid, C ) + LOCAL( grid, N )
+ LOCAL( grid, S ) + LOCAL( grid, E )
+ LOCAL( grid, W ) + LOCAL( grid, T )
+ LOCAL( grid, B ) + LOCAL( grid, NE )
+ LOCAL( grid, NW ) + LOCAL( grid, SE )
+ LOCAL( grid, SW ) + LOCAL( grid, NT )
+ LOCAL( grid, NB ) + LOCAL( grid, ST )
+ LOCAL( grid, SB ) + LOCAL( grid, ET )
+ LOCAL( grid, EB ) + LOCAL( grid, WT )
+ LOCAL( grid, WB );
if( rho < minRho ) minRho = rho;
if( rho > maxRho ) maxRho = rho;
mass += rho;
if( TEST_FLAG_SWEEP( grid, OBSTACLE )) {
nObstacleCells++;
}
else {
if( TEST_FLAG_SWEEP( grid, ACCEL ))
nAccelCells++;
else
nFluidCells++;
ux = + LOCAL( grid, E ) - LOCAL( grid, W )
+ LOCAL( grid, NE ) - LOCAL( grid, NW )
+ LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, ET ) + LOCAL( grid, EB )
- LOCAL( grid, WT ) - LOCAL( grid, WB );
uy = + LOCAL( grid, N ) - LOCAL( grid, S )
+ LOCAL( grid, NE ) + LOCAL( grid, NW )
- LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, NT ) + LOCAL( grid, NB )
- LOCAL( grid, ST ) - LOCAL( grid, SB );
uz = + LOCAL( grid, T ) - LOCAL( grid, B )
+ LOCAL( grid, NT ) - LOCAL( grid, NB )
+ LOCAL( grid, ST ) - LOCAL( grid, SB )
+ LOCAL( grid, ET ) - LOCAL( grid, EB )
+ LOCAL( grid, WT ) - LOCAL( grid, WB );
u2 = (ux*ux + uy*uy + uz*uz) / (rho*rho);
if( u2 < minU2 ) minU2 = u2;
if( u2 > maxU2 ) maxU2 = u2;
}
SWEEP_END
printf( "LBM_showGridStatistics:\n"
"\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\n"
"\tminRho: %8.4f maxRho: %8.4f mass: %e\n"
"\tminU: %e maxU: %e\n\n",
nObstacleCells, nAccelCells, nFluidCells,
minRho, maxRho, mass,
sqrt( minU2 ), sqrt( maxU2 ) );
}
/*############################################################################*/
static void storeValue( FILE* file, OUTPUT_PRECISION* v ) {
const int litteBigEndianTest = 1;
if( (*((unsigned char*) &litteBigEndianTest)) == 0 ) { /* big endian */
const char* vPtr = (char*) v;
char buffer[sizeof( OUTPUT_PRECISION )];
int i;
for (i = 0; i < sizeof( OUTPUT_PRECISION ); i++)
buffer[i] = vPtr[sizeof( OUTPUT_PRECISION ) - i - 1];
fwrite( buffer, sizeof( OUTPUT_PRECISION ), 1, file );
}
else { /* little endian */
fwrite( v, sizeof( OUTPUT_PRECISION ), 1, file );
}
}
/*############################################################################*/
void LBM_storeVelocityField( LBM_Grid grid, const char* filename,
const int binary ) {
OUTPUT_PRECISION rho, ux, uy, uz;
FILE* file = fopen( filename, (binary ? "wb" : "w") );
SWEEP_VAR
SWEEP_START(0,0,0,SIZE_X,SIZE_Y,SIZE_Z)
rho = + SRC_C( grid ) + SRC_N( grid )
+ SRC_S( grid ) + SRC_E( grid )
+ SRC_W( grid ) + SRC_T( grid )
+ SRC_B( grid ) + SRC_NE( grid )
+ SRC_NW( grid ) + SRC_SE( grid )
+ SRC_SW( grid ) + SRC_NT( grid )
+ SRC_NB( grid ) + SRC_ST( grid )
+ SRC_SB( grid ) + SRC_ET( grid )
+ SRC_EB( grid ) + SRC_WT( grid )
+ SRC_WB( grid );
ux = + SRC_E( grid ) - SRC_W( grid )
+ SRC_NE( grid ) - SRC_NW( grid )
+ SRC_SE( grid ) - SRC_SW( grid )
+ SRC_ET( grid ) + SRC_EB( grid )
- SRC_WT( grid ) - SRC_WB( grid );
uy = + SRC_N( grid ) - SRC_S( grid )
+ SRC_NE( grid ) + SRC_NW( grid )
- SRC_SE( grid ) - SRC_SW( grid )
+ SRC_NT( grid ) + SRC_NB( grid )
- SRC_ST( grid ) - SRC_SB( grid );
uz = + SRC_T( grid ) - SRC_B( grid )
+ SRC_NT( grid ) - SRC_NB( grid )
+ SRC_ST( grid ) - SRC_SB( grid )
+ SRC_ET( grid ) - SRC_EB( grid )
+ SRC_WT( grid ) - SRC_WB( grid );
ux /= rho;
uy /= rho;
uz /= rho;
if( binary ) {
/*
fwrite( &ux, sizeof( ux ), 1, file );
fwrite( &uy, sizeof( uy ), 1, file );
fwrite( &uz, sizeof( uz ), 1, file );
*/
storeValue( file, &ux );
storeValue( file, &uy );
storeValue( file, &uz );
} else
fprintf( file, "%e %e %e\n", ux, uy, uz );
SWEEP_END;
fclose( file );
}
| 1792f93fe9532c27214749a404eaf4332d859172.cu | /***************************************************************************
*cr
*cr (C) Copyright 2010 The Board of Trustees of the
*cr University of Illinois
*cr All Rights Reserved
*cr
***************************************************************************/
/*############################################################################*/
// includes, system
#include <math.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <float.h>
// includes, project
#include "main.h"
#include "lbm.h"
#ifndef __MCUDA__
#include <cuda.h>
#else
#include <mcuda.h>
#endif
#define DFL1 (1.0f/ 3.0f)
#define DFL2 (1.0f/18.0f)
#define DFL3 (1.0f/36.0f)
// includes, kernels
#include "lbm_kernel.cu"
#define REAL_MARGIN (CALC_INDEX(0, 0, 2, 0) - CALC_INDEX(0,0,0,0))
#define TOTAL_MARGIN (2*PADDED_X*PADDED_Y*N_CELL_ENTRIES)
/******************************************************************************/
void CUDA_LBM_performStreamCollide( LBM_Grid srcGrid, LBM_Grid dstGrid ) {
dim3 dimBlock, dimGrid;
dimBlock.x = SIZE_X;
dimGrid.x = SIZE_Y;
dimGrid.y = SIZE_Z;
dimBlock.y = dimBlock.z = dimGrid.z = 1;
performStreamCollide_kernel<<<dimGrid, dimBlock>>>(srcGrid, dstGrid);
CUDA_ERRCK;
}
/*############################################################################*/
void LBM_allocateGrid( float** ptr ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
*ptr = (float*)malloc( size );
if( ! *ptr ) {
printf( "LBM_allocateGrid: could not allocate %.1f MByte\n",
size / (1024.0*1024.0) );
exit( 1 );
}
memset( *ptr, 0, size );
printf( "LBM_allocateGrid: allocated %.1f MByte\n",
size / (1024.0*1024.0) );
*ptr += REAL_MARGIN;
}
/******************************************************************************/
void CUDA_LBM_allocateGrid( float** ptr ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
cudaMalloc((void**)ptr, size);
CUDA_ERRCK;
*ptr += REAL_MARGIN;
}
/*############################################################################*/
void LBM_freeGrid( float** ptr ) {
free( *ptr-REAL_MARGIN );
*ptr = NULL;
}
/******************************************************************************/
void CUDA_LBM_freeGrid( float** ptr ) {
cudaFree( *ptr-REAL_MARGIN );
*ptr = NULL;
}
/*############################################################################*/
void LBM_initializeGrid( LBM_Grid grid ) {
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
SRC_C( grid ) = DFL1;
SRC_N( grid ) = DFL2;
SRC_S( grid ) = DFL2;
SRC_E( grid ) = DFL2;
SRC_W( grid ) = DFL2;
SRC_T( grid ) = DFL2;
SRC_B( grid ) = DFL2;
SRC_NE( grid ) = DFL3;
SRC_NW( grid ) = DFL3;
SRC_SE( grid ) = DFL3;
SRC_SW( grid ) = DFL3;
SRC_NT( grid ) = DFL3;
SRC_NB( grid ) = DFL3;
SRC_ST( grid ) = DFL3;
SRC_SB( grid ) = DFL3;
SRC_ET( grid ) = DFL3;
SRC_EB( grid ) = DFL3;
SRC_WT( grid ) = DFL3;
SRC_WB( grid ) = DFL3;
CLEAR_ALL_FLAGS_SWEEP( grid );
SWEEP_END
}
/******************************************************************************/
void CUDA_LBM_initializeGrid( float** d_grid, float** h_grid ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
cudaMemcpy(*d_grid - REAL_MARGIN, *h_grid - REAL_MARGIN, size, cudaMemcpyHostToDevice);
CUDA_ERRCK;
}
void CUDA_LBM_getDeviceGrid( float** d_grid, float** h_grid ) {
const size_t size = TOTAL_PADDED_CELLS*N_CELL_ENTRIES*sizeof( float ) + 2*TOTAL_MARGIN*sizeof( float );
cudaThreadSynchronize();
CUDA_ERRCK;
cudaMemcpy(*h_grid - REAL_MARGIN, *d_grid - REAL_MARGIN, size, cudaMemcpyDeviceToHost);
CUDA_ERRCK;
}
/*############################################################################*/
void LBM_swapGrids( LBM_GridPtr grid1, LBM_GridPtr grid2 ) {
LBM_Grid aux = *grid1;
*grid1 = *grid2;
*grid2 = aux;
}
/*############################################################################*/
void LBM_loadObstacleFile( LBM_Grid grid, const char* filename ) {
int x, y, z;
FILE* file = fopen( filename, "rb" );
for( z = 0; z < SIZE_Z; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( fgetc( file ) != '.' ) SET_FLAG( grid, x, y, z, OBSTACLE );
}
fgetc( file );
}
fgetc( file );
}
fclose( file );
}
/*############################################################################*/
void LBM_initializeSpecialCellsForLDC( LBM_Grid grid ) {
int x, y, z;
for( z = -2; z < SIZE_Z+2; z++ ) {
for( y = 0; y < SIZE_Y; y++ ) {
for( x = 0; x < SIZE_X; x++ ) {
if( x == 0 || x == SIZE_X-1 ||
y == 0 || y == SIZE_Y-1 ||
z == 0 || z == SIZE_Z-1 ) {
SET_FLAG( grid, x, y, z, OBSTACLE );
}
else {
if( (z == 1 || z == SIZE_Z-2) &&
x > 1 && x < SIZE_X-2 &&
y > 1 && y < SIZE_Y-2 ) {
SET_FLAG( grid, x, y, z, ACCEL );
}
}
}
}
}
}
/*############################################################################*/
void LBM_showGridStatistics( LBM_Grid grid ) {
int nObstacleCells = 0,
nAccelCells = 0,
nFluidCells = 0;
float ux, uy, uz;
float minU2 = 1e+30, maxU2 = -1e+30, u2;
float minRho = 1e+30, maxRho = -1e+30, rho;
float mass = 0;
SWEEP_VAR
SWEEP_START( 0, 0, 0, 0, 0, SIZE_Z )
rho = LOCAL( grid, C ) + LOCAL( grid, N )
+ LOCAL( grid, S ) + LOCAL( grid, E )
+ LOCAL( grid, W ) + LOCAL( grid, T )
+ LOCAL( grid, B ) + LOCAL( grid, NE )
+ LOCAL( grid, NW ) + LOCAL( grid, SE )
+ LOCAL( grid, SW ) + LOCAL( grid, NT )
+ LOCAL( grid, NB ) + LOCAL( grid, ST )
+ LOCAL( grid, SB ) + LOCAL( grid, ET )
+ LOCAL( grid, EB ) + LOCAL( grid, WT )
+ LOCAL( grid, WB );
if( rho < minRho ) minRho = rho;
if( rho > maxRho ) maxRho = rho;
mass += rho;
if( TEST_FLAG_SWEEP( grid, OBSTACLE )) {
nObstacleCells++;
}
else {
if( TEST_FLAG_SWEEP( grid, ACCEL ))
nAccelCells++;
else
nFluidCells++;
ux = + LOCAL( grid, E ) - LOCAL( grid, W )
+ LOCAL( grid, NE ) - LOCAL( grid, NW )
+ LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, ET ) + LOCAL( grid, EB )
- LOCAL( grid, WT ) - LOCAL( grid, WB );
uy = + LOCAL( grid, N ) - LOCAL( grid, S )
+ LOCAL( grid, NE ) + LOCAL( grid, NW )
- LOCAL( grid, SE ) - LOCAL( grid, SW )
+ LOCAL( grid, NT ) + LOCAL( grid, NB )
- LOCAL( grid, ST ) - LOCAL( grid, SB );
uz = + LOCAL( grid, T ) - LOCAL( grid, B )
+ LOCAL( grid, NT ) - LOCAL( grid, NB )
+ LOCAL( grid, ST ) - LOCAL( grid, SB )
+ LOCAL( grid, ET ) - LOCAL( grid, EB )
+ LOCAL( grid, WT ) - LOCAL( grid, WB );
u2 = (ux*ux + uy*uy + uz*uz) / (rho*rho);
if( u2 < minU2 ) minU2 = u2;
if( u2 > maxU2 ) maxU2 = u2;
}
SWEEP_END
printf( "LBM_showGridStatistics:\n"
"\tnObstacleCells: %7i nAccelCells: %7i nFluidCells: %7i\n"
"\tminRho: %8.4f maxRho: %8.4f mass: %e\n"
"\tminU: %e maxU: %e\n\n",
nObstacleCells, nAccelCells, nFluidCells,
minRho, maxRho, mass,
sqrt( minU2 ), sqrt( maxU2 ) );
}
/*############################################################################*/
static void storeValue( FILE* file, OUTPUT_PRECISION* v ) {
const int litteBigEndianTest = 1;
if( (*((unsigned char*) &litteBigEndianTest)) == 0 ) { /* big endian */
const char* vPtr = (char*) v;
char buffer[sizeof( OUTPUT_PRECISION )];
int i;
for (i = 0; i < sizeof( OUTPUT_PRECISION ); i++)
buffer[i] = vPtr[sizeof( OUTPUT_PRECISION ) - i - 1];
fwrite( buffer, sizeof( OUTPUT_PRECISION ), 1, file );
}
else { /* little endian */
fwrite( v, sizeof( OUTPUT_PRECISION ), 1, file );
}
}
/*############################################################################*/
void LBM_storeVelocityField( LBM_Grid grid, const char* filename,
const int binary ) {
OUTPUT_PRECISION rho, ux, uy, uz;
FILE* file = fopen( filename, (binary ? "wb" : "w") );
SWEEP_VAR
SWEEP_START(0,0,0,SIZE_X,SIZE_Y,SIZE_Z)
rho = + SRC_C( grid ) + SRC_N( grid )
+ SRC_S( grid ) + SRC_E( grid )
+ SRC_W( grid ) + SRC_T( grid )
+ SRC_B( grid ) + SRC_NE( grid )
+ SRC_NW( grid ) + SRC_SE( grid )
+ SRC_SW( grid ) + SRC_NT( grid )
+ SRC_NB( grid ) + SRC_ST( grid )
+ SRC_SB( grid ) + SRC_ET( grid )
+ SRC_EB( grid ) + SRC_WT( grid )
+ SRC_WB( grid );
ux = + SRC_E( grid ) - SRC_W( grid )
+ SRC_NE( grid ) - SRC_NW( grid )
+ SRC_SE( grid ) - SRC_SW( grid )
+ SRC_ET( grid ) + SRC_EB( grid )
- SRC_WT( grid ) - SRC_WB( grid );
uy = + SRC_N( grid ) - SRC_S( grid )
+ SRC_NE( grid ) + SRC_NW( grid )
- SRC_SE( grid ) - SRC_SW( grid )
+ SRC_NT( grid ) + SRC_NB( grid )
- SRC_ST( grid ) - SRC_SB( grid );
uz = + SRC_T( grid ) - SRC_B( grid )
+ SRC_NT( grid ) - SRC_NB( grid )
+ SRC_ST( grid ) - SRC_SB( grid )
+ SRC_ET( grid ) - SRC_EB( grid )
+ SRC_WT( grid ) - SRC_WB( grid );
ux /= rho;
uy /= rho;
uz /= rho;
if( binary ) {
/*
fwrite( &ux, sizeof( ux ), 1, file );
fwrite( &uy, sizeof( uy ), 1, file );
fwrite( &uz, sizeof( uz ), 1, file );
*/
storeValue( file, &ux );
storeValue( file, &uy );
storeValue( file, &uz );
} else
fprintf( file, "%e %e %e\n", ux, uy, uz );
SWEEP_END;
fclose( file );
}
|
602f263652dc658f5bf96b7c3aeebf27b6a43042.hip | // !!! This is a file automatically generated by hipify!!!
#include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/hip/Exceptions.h>
#include <ATen/hip/HIPContext.h>
#include <cmath>
#include <limits>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
namespace at {
namespace native {
template<typename T, typename accT = T>
struct LinspaceOp {
__host__ __device__ LinspaceOp(accT start, accT step):
start_(start), step_(step) { }
__device__ __forceinline__ T operator()(ptrdiff_t index) {
accT increment = step_ * static_cast<accT>(index);
accT value = start_ + increment;
return static_cast<T>(value);
}
const accT start_, step_;
};
template<typename T, typename accT = T>
struct LogspaceOp {
__host__ __device__ LogspaceOp(accT start, accT step, accT base):
start_(start), step_(step), base_(base) { }
__device__ __forceinline__ T operator()(ptrdiff_t index) {
accT increment = step_ * static_cast<accT>(index);
accT value = ::pow(base_, start_ + increment);
return static_cast<T>(value);
}
const accT start_, step_, base_;
};
Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
Tensor r = result.is_contiguous() ? result : result.contiguous();
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else {
AT_DISPATCH_FLOATING_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
LinspaceOp<scalar_t> linspace_method(scalar_start, step);
thrust::device_ptr<scalar_t> data_(r.data_ptr<scalar_t>());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto policy = thrust::hip::par.on(stream);
thrust::tabulate(policy, data_, data_ + steps, linspace_method);
});
}
if (!result.is_contiguous()) {
result.copy_(r);
}
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& logspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps, double base) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
Tensor r = result.is_contiguous() ? result : result.contiguous();
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(::pow(base, start.to<double>()));
} else {
AT_DISPATCH_FLOATING_TYPES(r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
LogspaceOp<scalar_t> logspace_method(scalar_start, step, scalar_base);
thrust::device_ptr<scalar_t> data_(r.data_ptr<scalar_t>());
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto policy = thrust::hip::par.on(stream);
thrust::tabulate(policy, data_, data_ + steps, logspace_method);
});
}
if (!result.is_contiguous()) {
result.copy_(r);
}
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& range_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
Tensor r = result.is_contiguous() ? result : result.contiguous();
LinspaceOp<scalar_t, accscalar_t> linspace_method(xstart, xstep);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto policy = thrust::hip::par.on(stream);
thrust::device_ptr<scalar_t> data_ptr(r.data_ptr<scalar_t>());
thrust::tabulate(policy, data_ptr, data_ptr + size, linspace_method);
if (!result.is_contiguous()) {
result.copy_(r);
}
});
AT_CUDA_CHECK(hipGetLastError());
return result;
}
Tensor& arange_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = ::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = ::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
Tensor r = result.is_contiguous() ? result : result.contiguous();
LinspaceOp<scalar_t, accscalar_t> linspace_method(xstart, xstep);
hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA();
auto policy = thrust::hip::par.on(stream);
thrust::device_ptr<scalar_t> data_ptr(r.data_ptr<scalar_t>());
thrust::tabulate(policy, data_ptr, data_ptr + size, linspace_method);
if (!result.is_contiguous()) {
result.copy_(r);
}
});
AT_CUDA_CHECK(hipGetLastError());
return result;
}
}} // namespace at::native
| 602f263652dc658f5bf96b7c3aeebf27b6a43042.cu | #include <ATen/Dispatch.h>
#include <ATen/NativeFunctions.h>
#include <ATen/AccumulateType.h>
#include <ATen/cuda/Exceptions.h>
#include <ATen/cuda/CUDAContext.h>
#include <cmath>
#include <limits>
#include <thrust/device_ptr.h>
#include <thrust/sequence.h>
#include <thrust/execution_policy.h>
namespace at {
namespace native {
template<typename T, typename accT = T>
struct LinspaceOp {
__host__ __device__ LinspaceOp(accT start, accT step):
start_(start), step_(step) { }
__device__ __forceinline__ T operator()(ptrdiff_t index) {
accT increment = step_ * static_cast<accT>(index);
accT value = start_ + increment;
return static_cast<T>(value);
}
const accT start_, step_;
};
template<typename T, typename accT = T>
struct LogspaceOp {
__host__ __device__ LogspaceOp(accT start, accT step, accT base):
start_(start), step_(step), base_(base) { }
__device__ __forceinline__ T operator()(ptrdiff_t index) {
accT increment = step_ * static_cast<accT>(index);
accT value = std::pow(base_, start_ + increment);
return static_cast<T>(value);
}
const accT start_, step_, base_;
};
Tensor& linspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
Tensor r = result.is_contiguous() ? result : result.contiguous();
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(start);
} else {
AT_DISPATCH_FLOATING_TYPES(r.scalar_type(), "linspace_cuda", [&]() {
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
LinspaceOp<scalar_t> linspace_method(scalar_start, step);
thrust::device_ptr<scalar_t> data_(r.data_ptr<scalar_t>());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto policy = thrust::cuda::par.on(stream);
thrust::tabulate(policy, data_, data_ + steps, linspace_method);
});
}
if (!result.is_contiguous()) {
result.copy_(r);
}
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& logspace_cuda_out(Tensor& result, Scalar start, Scalar end, int64_t steps, double base) {
TORCH_CHECK(steps >= 0, "number of steps must be non-negative");
if (result.numel() != steps) {
result.resize_({steps});
}
Tensor r = result.is_contiguous() ? result : result.contiguous();
if (steps == 0) {
// skip
} else if (steps == 1) {
r.fill_(std::pow(base, start.to<double>()));
} else {
AT_DISPATCH_FLOATING_TYPES(r.scalar_type(), "logspace_cuda", [&]() {
scalar_t scalar_base = static_cast<scalar_t>(base);
scalar_t scalar_start = start.to<scalar_t>();
scalar_t scalar_end = end.to<scalar_t>();
scalar_t step = (scalar_end - scalar_start) / static_cast<scalar_t>(steps - 1);
LogspaceOp<scalar_t> logspace_method(scalar_start, step, scalar_base);
thrust::device_ptr<scalar_t> data_(r.data_ptr<scalar_t>());
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto policy = thrust::cuda::par.on(stream);
thrust::tabulate(policy, data_, data_ + steps, logspace_method);
});
}
if (!result.is_contiguous()) {
result.copy_(r);
}
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& range_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "range_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
int64_t size = static_cast<int64_t>(((xend - xstart) / xstep) + 1);
if (result.numel() != size) {
result.resize_({size});
}
Tensor r = result.is_contiguous() ? result : result.contiguous();
LinspaceOp<scalar_t, accscalar_t> linspace_method(xstart, xstep);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto policy = thrust::cuda::par.on(stream);
thrust::device_ptr<scalar_t> data_ptr(r.data_ptr<scalar_t>());
thrust::tabulate(policy, data_ptr, data_ptr + size, linspace_method);
if (!result.is_contiguous()) {
result.copy_(r);
}
});
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
Tensor& arange_cuda_out(Tensor& result, Scalar start, Scalar end, Scalar step) {
AT_DISPATCH_ALL_TYPES_AND(at::ScalarType::Half, result.scalar_type(), "arange_cuda", [&]() {
using accscalar_t = at::acc_type<scalar_t, true>;
auto xstart = start.to<accscalar_t>();
auto xend = end.to<accscalar_t>();
auto xstep = step.to<accscalar_t>();
// we use double precision for (start - end) / step
// to compute size_d for consistency across devices.
// The problem with using accscalar_t is that accscalar_t might be float32 on gpu for a float32 scalar_t,
// but double on cpu for the same,
// and the effective output size starts differing on CPU vs GPU because of precision issues, which
// we dont want.
// the corner-case we do want to take into account is int64_t, which has higher precision than double
double size_d;
if (std::is_same<scalar_t, int64_t>::value) {
size_d = std::ceil(static_cast<double>(end.to<accscalar_t>() - start.to<accscalar_t>())
/ step.to<accscalar_t>());
} else {
size_d = std::ceil(static_cast<double>(end.to<double>() - start.to<double>())
/ step.to<double>());
}
TORCH_CHECK(xstep > 0 || xstep < 0, "step must be nonzero");
TORCH_CHECK(std::isfinite(static_cast<double>(xstart)) &&
std::isfinite(static_cast<double>(xend)),
"unsupported range: ", xstart, " -> ", xend);
TORCH_CHECK(((xstep > 0) && (xend >= xstart)) || ((xstep < 0) && (xend <= xstart)),
"upper bound and larger bound inconsistent with step sign");
TORCH_CHECK(size_d >= 0 && size_d <= static_cast<double>(std::numeric_limits<int64_t>::max()),
"invalid size, possible overflow?");
int64_t size = static_cast<int64_t>(size_d);
int64_t numel = result.numel();
if (numel != size) {
if(numel > 0){
TORCH_WARN("The number of elements in the out tensor of shape ", result.sizes(),
" is ", numel, " which does not match the computed number of elements ", size,
". Note that this may occur as a result of rounding error. "
"The out tensor will be resized to a tensor of shape (", size, ",).");
}
result.resize_({size});
}
Tensor r = result.is_contiguous() ? result : result.contiguous();
LinspaceOp<scalar_t, accscalar_t> linspace_method(xstart, xstep);
cudaStream_t stream = at::cuda::getCurrentCUDAStream();
auto policy = thrust::cuda::par.on(stream);
thrust::device_ptr<scalar_t> data_ptr(r.data_ptr<scalar_t>());
thrust::tabulate(policy, data_ptr, data_ptr + size, linspace_method);
if (!result.is_contiguous()) {
result.copy_(r);
}
});
AT_CUDA_CHECK(cudaGetLastError());
return result;
}
}} // namespace at::native
|
3f945bbc4ac08778aa485c9b068527ae12444189.hip | // !!! This is a file automatically generated by hipify!!!
/** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA
* The modifications are
* removed texture memory usage
* removed split query KNN computation
* added feature extraction with bilinear interpolation
*
* Last modified by Christopher B. Choy <[email protected]> 12/23/2016
*/
// Includes
#include "hip/hip_runtime.h"
#include <cstdio>
#include <sys/time.h>
#include <time.h>
// Constants used by the program
#define BLOCK_DIM 16
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
__global__ void extract_with_interpolation(int nthreads, float *data,
float *n_xy_coords,
float *extracted_data,
int n_max_coord, int channels,
int height, int width) {
int x0, x1, y0, y1, nc;
float wx0, wx1, wy0, wy1;
int n, nd;
float x, y;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
n = (index / n_max_coord);
nd = n * n_max_coord * channels;
x = n_xy_coords[index * 2];
y = n_xy_coords[index * 2 + 1];
x0 = static_cast<int>(floor(x));
x1 = x0 + 1;
y0 = static_cast<int>(floor(y));
y1 = y0 + 1;
x0 = x0 <= 0 ? 0 : (x0 >= (width - 1) ? (width - 1) : x0);
y0 = y0 <= 0 ? 0 : (y0 >= (height - 1) ? (height - 1) : y0);
x1 = x1 <= 0 ? 0 : (x1 >= (width - 1) ? (width - 1) : x1);
y1 = y1 <= 0 ? 0 : (y1 >= (height - 1) ? (height - 1) : y1);
wx0 = static_cast<float>(x1) - x;
wx1 = x - x0;
wy0 = static_cast<float>(y1) - y;
wy1 = y - y0;
if (x0 == x1) {
wx0 = 1;
wx1 = 0;
}
if (y0 == y1) {
wy0 = 1;
wy1 = 0;
}
for (int c = 0; c < channels; c++) {
nc = (n * channels + c) * height;
// extracted_data[index * channels + c] = wy0 * wx0 * data[(nc + y0) *
// width + x0]
// extracted_data[nd + index % n_max_coord + n_max_coord * c] = index;
extracted_data[nd + index % n_max_coord + n_max_coord * c] =
wy0 * wx0 * data[(nc + y0) * width + x0] +
wy1 * wx0 * data[(nc + y1) * width + x0] +
wy0 * wx1 * data[(nc + y0) * width + x1] +
wy1 * wx1 * data[(nc + y1) * width + x1];
}
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal(float *A, int wA, float *B, int wB,
int dim, float *AB) {
// Declaration of the shared memory arrays As and Bs used to store the
// sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * wA;
step_B = BLOCK_DIM * wB;
end_A = begin_A + (dim - 1) * wA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to
// computations and to write in output matrix
int cond2 =
(begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block
// sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
if (a / wA + ty < dim) {
shared_A[ty][tx] = (cond0) ? A[a + wA * ty + tx] : 0;
shared_B[ty][tx] = (cond1) ? B[b + wB * ty + tx] : 0;
} else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one
// element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k) {
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp * tmp;
}
}
// Synchronize to make sure that the preceding computation is done before
// loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
AB[(begin_A + ty) * wB + begin_B + tx] = ssd;
}
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in
* the top.
*
* @param dist distance matrix
* @param ind index matrix
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int *ind, int width, int height,
int k) {
// printf("test2\n");
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 0;
// Part 1 : sort kth firt elementZ
for (l = 1; l < k; l++) {
curr_row = l * width;
curr_dist = p_dist[curr_row];
if (curr_dist < max_dist) {
i = l - 1;
for (int a = 0; a < l - 1; a++) {
if (p_dist[a * width] > curr_dist) {
i = a;
break;
}
}
for (j = l; j > i; j--) {
p_dist[j * width] = p_dist[(j - 1) * width];
p_ind[j * width] = p_ind[(j - 1) * width];
}
p_dist[i * width] = curr_dist;
p_ind[i * width] = l;
} else {
p_ind[l * width] = l;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k - 1) * width;
for (l = k; l < height; l++) {
curr_dist = p_dist[l * width];
if (curr_dist < max_dist) {
i = k - 1;
for (int a = 0; a < k - 1; a++) {
if (p_dist[a * width] > curr_dist) {
i = a;
break;
}
}
for (j = k - 1; j > i; j--) {
p_dist[j * width] = p_dist[(j - 1) * width];
p_ind[j * width] = p_ind[(j - 1) * width];
}
p_dist[i * width] = curr_dist;
p_ind[i * width] = l;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
// printf("test3\n");
if (xIndex < width && yIndex < k)
dist[yIndex * width + xIndex] = sqrt(dist[yIndex * width + xIndex]);
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void printErrorMessage(hipError_t error, int memorySize) {
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", hipGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
/**
* Feature extraction algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy data (activation, coordinates) from host to device memory
* - Extract features from the coordinates using bilinear interpolation
* - Copy extracted features from device to host memory
*
* @param activation reference feature map
* @param n_batch number of feature maps
* @param n_channel size of the feature dimension
* @param height height of the feature map
* @param width width of the feature map
* @param coords coordinates of the points for extraction
* @param extracted_activation pointer for the final extracted features
*
*/
void extract_cuda(float *activation, int n_batch, int n_channel, int height,
int width, float *coords, int n_max_coord, int dim_coord,
float *extracted_activation) {
// activation n_batch x n_channel x height x width
// coords n_batch x n_max_coord x dim_coord
// uninitialized empty pointer which will be filled with extracted_activation
// n_batch x n_channel x n_max_coord. KNN requires dim x n_feature format
unsigned int size_of_float = sizeof(float);
// Variables
float *activation_device;
float *coord_device;
float *extracted_activation_device;
// CUDA Initialisation
hipInit(0);
// Allocation of global memory for query points and for distances, CUDA_CHECK
hipMalloc((void **)&activation_device,
n_batch * n_channel * height * width * size_of_float);
hipMalloc((void **)&extracted_activation_device,
n_batch * n_channel * n_max_coord * size_of_float);
hipMalloc((void **)&coord_device,
n_batch * n_max_coord * dim_coord * size_of_float);
// Grids ans threads
dim3 g_size_r((n_batch * n_max_coord * dim_coord) / 256, 1, 1);
if ((n_batch * n_max_coord * dim_coord) % 256 != 0)
g_size_r.x += 1;
hipMemset(extracted_activation_device, 0,
n_batch * n_channel * n_max_coord * size_of_float);
// Copy coordinates to the device
hipMemcpy(coord_device, &coords[0],
n_batch * n_max_coord * dim_coord * size_of_float,
hipMemcpyHostToDevice);
// Copy of part of query actually being treated
hipMemcpy(activation_device, &activation[0],
n_batch * n_channel * height * width * size_of_float,
hipMemcpyHostToDevice);
// Grids ans threads
dim3 g_size((n_batch * n_max_coord) / 256, 1, 1);
dim3 t_size(256, 1, 1);
if ((n_batch * n_max_coord) % 256 != 0)
g_size.x += 1;
hipLaunchKernelGGL(( extract_with_interpolation), dim3(g_size), dim3(t_size), 0, 0,
n_batch * n_max_coord, activation_device, coord_device,
extracted_activation_device, n_max_coord, n_channel, height, width);
// Memory copy of output from device to host
hipMemcpy(extracted_activation, &extracted_activation_device[0],
n_batch * n_channel * n_max_coord * size_of_float,
hipMemcpyDeviceToHost);
// Free memory
hipFree(coord_device);
hipFree(activation_device);
hipFree(extracted_activation_device);
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query
* point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear
* matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear
* matrix
*
*/
void knn_cuda(float *ref_host, int ref_width, float *query_host,
int query_width, int height, int k, float *dist_host,
int *ind_host) {
// Variables
// CUDA Initialisation
hipInit(0);
// Grids ans threads
dim3 g_16x16(query_width / 16, ref_width / 16, 1);
dim3 t_16x16(16, 16, 1);
if (query_width % 16 != 0)
g_16x16.x += 1;
if (ref_width % 16 != 0)
g_16x16.y += 1;
//
dim3 g_256x1(query_width / 256, 1, 1);
dim3 t_256x1(256, 1, 1);
if (query_width % 256 != 0)
g_256x1.x += 1;
dim3 g_k_16x16(query_width / 16, k / 16, 1);
dim3 t_k_16x16(16, 16, 1);
if (query_width % 16 != 0)
g_k_16x16.x += 1;
if (k % 16 != 0)
g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
hipLaunchKernelGGL(( cuComputeDistanceGlobal), dim3(g_16x16), dim3(t_16x16), 0, 0, ref_host, ref_width, query_host,
query_width, height, dist_host);
// Kernel 2: Sort each column
hipLaunchKernelGGL(( cuInsertionSort), dim3(g_256x1), dim3(t_256x1), 0, 0, dist_host, ind_host, query_width,
ref_width, k);
// Kernel 3: Compute square root of k first elements
hipLaunchKernelGGL(( cuParallelSqrt), dim3(g_k_16x16), dim3(t_k_16x16), 0, 0, dist_host, query_width, k);
hipDeviceSynchronize();
}
float compute_distance(const float *ref, int ref_nb, const float *query,
int query_nb, int dim, int ref_index, int query_index) {
float sum = 0.f;
for (int d = 0; d < dim; ++d) {
const float diff =
ref[d * ref_nb + ref_index] - query[d * query_nb + query_index];
sum += diff * diff;
}
return sqrtf(sum);
}
void modified_insertion_sort(float *dist, int *index, int length, int k) {
// Initialise the first index
index[0] = 0;
// Go through all points
for (int i = 1; i < length; ++i) {
// Store current distance and associated index
float curr_dist = dist[i];
int curr_index = i;
// Skip the current value if its index is >= k and if it's higher the k-th
// slready sorted mallest value
if (i >= k && curr_dist >= dist[k - 1]) {
continue;
}
// Shift values (and indexes) higher that the current distance to the right
int j = min(i, k - 1);
while (j > 0 && dist[j - 1] > curr_dist) {
dist[j] = dist[j - 1];
index[j] = index[j - 1];
--j;
}
// Write the current distance and index at their position
dist[j] = curr_dist;
index[j] = curr_index;
}
}
bool knn_c(const float *ref, int ref_nb, const float *query, int query_nb,
int dim, int k, float *knn_dist, int *knn_index) {
// Allocate local array to store all the distances / indexes for a given query
// point
float *dist = (float *)malloc(ref_nb * sizeof(float));
int *index = (int *)malloc(ref_nb * sizeof(int));
// Allocation checks
if (!dist || !index) {
printf("Memory allocation error\n");
free(dist);
free(index);
return false;
}
// Process one query point at the time
for (int i = 0; i < query_nb; ++i) {
// Compute all distances / indexes
for (int j = 0; j < ref_nb; ++j) {
dist[j] = compute_distance(ref, ref_nb, query, query_nb, dim, j, i);
index[j] = j;
}
// Sort distances / indexes
modified_insertion_sort(dist, index, ref_nb, k);
// Copy k smallest distances and their associated index
for (int j = 0; j < k; ++j) {
knn_dist[j * query_nb + i] = dist[j];
knn_index[j * query_nb + i] = index[j];
}
}
// Memory clean-up
free(dist);
free(index);
return true;
}
/**
* Example of use of kNN search CUDA.
*/
int main(void) {
// Variables and parameters
float *ref; // Pointer to reference point array
float *query; // Pointer to query point array
float *dist, *dist_c; // Pointer to distance array
int *ind, *ind_c; // Pointer to index array
int ref_nb = 4096; // Reference point number, max=65535
int query_nb = 4096; // Query point number, max=65535
int dim = 32; // Dimension of points
int k = 20; // Nearest neighbors to consider
int iterations = 100;
int c_iterations = 10;
int i;
const float precision = 0.001f; // distance error max
int nb_correct_precisions = 0;
int nb_correct_indexes = 0;
float *knn_dist = (float *)malloc(query_nb * k * sizeof(float));
int *knn_index = (int *)malloc(query_nb * k * sizeof(int));
// Memory allocation
// ref = (float *)malloc(ref_nb * dim * sizeof(float));
// query = (float *)malloc(query_nb * dim * sizeof(float));
hipMallocManaged(&ref, ref_nb * dim * sizeof(float));
hipMallocManaged(&query, query_nb * dim * sizeof(float));
hipMallocManaged(&dist, query_nb * ref_nb * sizeof(float));
hipMallocManaged(&ind, query_nb * k * sizeof(int));
dist_c = (float *)malloc(query_nb * k * sizeof(float));
ind_c = (int *)malloc(query_nb * k * sizeof(float));
// Init
srand(time(NULL));
for (i = 0; i < ref_nb * dim; i++)
ref[i] = (float)rand() / (float)RAND_MAX;
for (i = 0; i < query_nb * dim; i++)
query[i] = (float)rand() / (float)RAND_MAX;
printf("Ground truth computation in progress...\n\n");
if (!knn_c(ref, ref_nb, query, query_nb, dim, k, knn_dist, knn_index)) {
free(knn_dist);
free(knn_index);
return EXIT_FAILURE;
}
// Variables for duration evaluation
hipEvent_t start, stop;
hipEventCreate(&start);
hipEventCreate(&stop);
float elapsed_time;
// Display informations
printf("Number of reference points : %6d\n", ref_nb);
printf("Number of query points : %6d\n", query_nb);
printf("Dimension of points : %4d\n", dim);
printf("Number of neighbors to consider : %4d\n", k);
printf("Processing kNN search :\n");
printf("On CPU: \n");
struct timeval tic;
gettimeofday(&tic, NULL);
for (i = 0; i < c_iterations; i++) {
knn_c(ref, ref_nb, query, query_nb, dim, k, dist_c, ind_c);
}
for (int i = 0; i < query_nb * k; ++i) {
if (fabs(dist_c[i] - knn_dist[i]) <= precision) {
nb_correct_precisions++;
}
if (ind_c[i] == knn_index[i]) {
nb_correct_indexes++;
}
}
struct timeval toc;
gettimeofday(&toc, NULL);
elapsed_time = toc.tv_sec - tic.tv_sec;
elapsed_time += (toc.tv_usec - tic.tv_usec) / 1000000.;
float precision_accuracy = nb_correct_precisions / ((float)query_nb * k);
float index_accuracy = nb_correct_indexes / ((float)query_nb * k);
printf("%f, %f\n", precision_accuracy, index_accuracy);
printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time,
c_iterations, elapsed_time / (c_iterations));
printf("on GPU: \n");
// Call kNN search CUDA
hipEventRecord(start, 0);
for (i = 0; i < iterations; i++) {
knn_cuda(ref, ref_nb, query, query_nb, dim, k, dist, ind);
}
nb_correct_precisions = 0;
nb_correct_indexes = 0;
for (int i = 0; i < query_nb * k; ++i) {
if (fabs(dist[i] - knn_dist[i]) <= precision) {
nb_correct_precisions++;
}
if (ind[i] == knn_index[i]) {
nb_correct_indexes++;
}
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&elapsed_time, start, stop);
precision_accuracy = nb_correct_precisions / ((float)query_nb * k);
index_accuracy = nb_correct_indexes / ((float)query_nb * k);
printf("%f, %f\n", precision_accuracy, index_accuracy);
printf(" done in %f s for %d iterations (%f s by iteration)\n",
elapsed_time / 1000, iterations, elapsed_time / (iterations * 1000));
// Destroy cuda event object and free memory
hipEventDestroy(start);
hipEventDestroy(stop);
hipFree(ind);
hipFree(dist);
hipFree(query);
hipFree(ref);
free(dist_c);
free(ind_c);
} | 3f945bbc4ac08778aa485c9b068527ae12444189.cu | /** Modifed version of knn-CUDA from https://github.com/vincentfpgarcia/kNN-CUDA
* The modifications are
* removed texture memory usage
* removed split query KNN computation
* added feature extraction with bilinear interpolation
*
* Last modified by Christopher B. Choy <[email protected]> 12/23/2016
*/
// Includes
#include "cuda.h"
#include <cstdio>
#include <sys/time.h>
#include <time.h>
// Constants used by the program
#define BLOCK_DIM 16
//-----------------------------------------------------------------------------------------------//
// KERNELS //
//-----------------------------------------------------------------------------------------------//
__global__ void extract_with_interpolation(int nthreads, float *data,
float *n_xy_coords,
float *extracted_data,
int n_max_coord, int channels,
int height, int width) {
int x0, x1, y0, y1, nc;
float wx0, wx1, wy0, wy1;
int n, nd;
float x, y;
for (int index = blockIdx.x * blockDim.x + threadIdx.x; index < (nthreads);
index += blockDim.x * gridDim.x) {
n = (index / n_max_coord);
nd = n * n_max_coord * channels;
x = n_xy_coords[index * 2];
y = n_xy_coords[index * 2 + 1];
x0 = static_cast<int>(floor(x));
x1 = x0 + 1;
y0 = static_cast<int>(floor(y));
y1 = y0 + 1;
x0 = x0 <= 0 ? 0 : (x0 >= (width - 1) ? (width - 1) : x0);
y0 = y0 <= 0 ? 0 : (y0 >= (height - 1) ? (height - 1) : y0);
x1 = x1 <= 0 ? 0 : (x1 >= (width - 1) ? (width - 1) : x1);
y1 = y1 <= 0 ? 0 : (y1 >= (height - 1) ? (height - 1) : y1);
wx0 = static_cast<float>(x1) - x;
wx1 = x - x0;
wy0 = static_cast<float>(y1) - y;
wy1 = y - y0;
if (x0 == x1) {
wx0 = 1;
wx1 = 0;
}
if (y0 == y1) {
wy0 = 1;
wy1 = 0;
}
for (int c = 0; c < channels; c++) {
nc = (n * channels + c) * height;
// extracted_data[index * channels + c] = wy0 * wx0 * data[(nc + y0) *
// width + x0]
// extracted_data[nd + index % n_max_coord + n_max_coord * c] = index;
extracted_data[nd + index % n_max_coord + n_max_coord * c] =
wy0 * wx0 * data[(nc + y0) * width + x0] +
wy1 * wx0 * data[(nc + y1) * width + x0] +
wy0 * wx1 * data[(nc + y0) * width + x1] +
wy1 * wx1 * data[(nc + y1) * width + x1];
}
}
}
/**
* Computes the distance between two matrix A (reference points) and
* B (query points) containing respectively wA and wB points.
*
* @param A pointer on the matrix A
* @param wA width of the matrix A = number of points in A
* @param B pointer on the matrix B
* @param wB width of the matrix B = number of points in B
* @param dim dimension of points = height of matrices A and B
* @param AB pointer on the matrix containing the wA*wB distances computed
*/
__global__ void cuComputeDistanceGlobal(float *A, int wA, float *B, int wB,
int dim, float *AB) {
// Declaration of the shared memory arrays As and Bs used to store the
// sub-matrix of A and B
__shared__ float shared_A[BLOCK_DIM][BLOCK_DIM];
__shared__ float shared_B[BLOCK_DIM][BLOCK_DIM];
// Sub-matrix of A (begin, step, end) and Sub-matrix of B (begin, step)
__shared__ int begin_A;
__shared__ int begin_B;
__shared__ int step_A;
__shared__ int step_B;
__shared__ int end_A;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Other variables
float tmp;
float ssd = 0;
// Loop parameters
begin_A = BLOCK_DIM * blockIdx.y;
begin_B = BLOCK_DIM * blockIdx.x;
step_A = BLOCK_DIM * wA;
step_B = BLOCK_DIM * wB;
end_A = begin_A + (dim - 1) * wA;
// Conditions
int cond0 = (begin_A + tx < wA); // used to write in shared memory
int cond1 = (begin_B + tx < wB); // used to write in shared memory & to
// computations and to write in output matrix
int cond2 =
(begin_A + ty < wA); // used to computations and to write in output matrix
// Loop over all the sub-matrices of A and B required to compute the block
// sub-matrix
for (int a = begin_A, b = begin_B; a <= end_A; a += step_A, b += step_B) {
// Load the matrices from device memory to shared memory; each thread loads
// one element of each matrix
if (a / wA + ty < dim) {
shared_A[ty][tx] = (cond0) ? A[a + wA * ty + tx] : 0;
shared_B[ty][tx] = (cond1) ? B[b + wB * ty + tx] : 0;
} else {
shared_A[ty][tx] = 0;
shared_B[ty][tx] = 0;
}
// Synchronize to make sure the matrices are loaded
__syncthreads();
// Compute the difference between the two matrixes; each thread computes one
// element of the block sub-matrix
if (cond2 && cond1) {
for (int k = 0; k < BLOCK_DIM; ++k) {
tmp = shared_A[k][ty] - shared_B[k][tx];
ssd += tmp * tmp;
}
}
// Synchronize to make sure that the preceding computation is done before
// loading two new sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory; each thread writes one element
if (cond2 && cond1) {
AB[(begin_A + ty) * wB + begin_B + tx] = ssd;
}
}
/**
* Gathers k-th smallest distances for each column of the distance matrix in
* the top.
*
* @param dist distance matrix
* @param ind index matrix
* @param width width of the distance matrix and of the index matrix
* @param height height of the distance matrix and of the index matrix
* @param k number of neighbors to consider
*/
__global__ void cuInsertionSort(float *dist, int *ind, int width, int height,
int k) {
// printf("test2\n");
// Variables
int l, i, j;
float *p_dist;
int *p_ind;
float curr_dist, max_dist;
int curr_row, max_row;
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
if (xIndex < width) {
// Pointer shift, initialization, and max value
p_dist = dist + xIndex;
p_ind = ind + xIndex;
max_dist = p_dist[0];
p_ind[0] = 0;
// Part 1 : sort kth firt elementZ
for (l = 1; l < k; l++) {
curr_row = l * width;
curr_dist = p_dist[curr_row];
if (curr_dist < max_dist) {
i = l - 1;
for (int a = 0; a < l - 1; a++) {
if (p_dist[a * width] > curr_dist) {
i = a;
break;
}
}
for (j = l; j > i; j--) {
p_dist[j * width] = p_dist[(j - 1) * width];
p_ind[j * width] = p_ind[(j - 1) * width];
}
p_dist[i * width] = curr_dist;
p_ind[i * width] = l;
} else {
p_ind[l * width] = l;
}
max_dist = p_dist[curr_row];
}
// Part 2 : insert element in the k-th first lines
max_row = (k - 1) * width;
for (l = k; l < height; l++) {
curr_dist = p_dist[l * width];
if (curr_dist < max_dist) {
i = k - 1;
for (int a = 0; a < k - 1; a++) {
if (p_dist[a * width] > curr_dist) {
i = a;
break;
}
}
for (j = k - 1; j > i; j--) {
p_dist[j * width] = p_dist[(j - 1) * width];
p_ind[j * width] = p_ind[(j - 1) * width];
}
p_dist[i * width] = curr_dist;
p_ind[i * width] = l;
max_dist = p_dist[max_row];
}
}
}
}
/**
* Computes the square root of the first line (width-th first element)
* of the distance matrix.
*
* @param dist distance matrix
* @param width width of the distance matrix
* @param k number of neighbors to consider
*/
__global__ void cuParallelSqrt(float *dist, int width, int k) {
unsigned int xIndex = blockIdx.x * blockDim.x + threadIdx.x;
unsigned int yIndex = blockIdx.y * blockDim.y + threadIdx.y;
// printf("test3\n");
if (xIndex < width && yIndex < k)
dist[yIndex * width + xIndex] = sqrt(dist[yIndex * width + xIndex]);
}
//-----------------------------------------------------------------------------------------------//
// K-th NEAREST NEIGHBORS //
//-----------------------------------------------------------------------------------------------//
/**
* Prints the error message return during the memory allocation.
*
* @param error error value return by the memory allocation function
* @param memorySize size of memory tried to be allocated
*/
void printErrorMessage(cudaError_t error, int memorySize) {
printf("==================================================\n");
printf("MEMORY ALLOCATION ERROR : %s\n", cudaGetErrorString(error));
printf("Whished allocated memory : %d\n", memorySize);
printf("==================================================\n");
}
/**
* Feature extraction algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy data (activation, coordinates) from host to device memory
* - Extract features from the coordinates using bilinear interpolation
* - Copy extracted features from device to host memory
*
* @param activation reference feature map
* @param n_batch number of feature maps
* @param n_channel size of the feature dimension
* @param height height of the feature map
* @param width width of the feature map
* @param coords coordinates of the points for extraction
* @param extracted_activation pointer for the final extracted features
*
*/
void extract_cuda(float *activation, int n_batch, int n_channel, int height,
int width, float *coords, int n_max_coord, int dim_coord,
float *extracted_activation) {
// activation n_batch x n_channel x height x width
// coords n_batch x n_max_coord x dim_coord
// uninitialized empty pointer which will be filled with extracted_activation
// n_batch x n_channel x n_max_coord. KNN requires dim x n_feature format
unsigned int size_of_float = sizeof(float);
// Variables
float *activation_device;
float *coord_device;
float *extracted_activation_device;
// CUDA Initialisation
cuInit(0);
// Allocation of global memory for query points and for distances, CUDA_CHECK
cudaMalloc((void **)&activation_device,
n_batch * n_channel * height * width * size_of_float);
cudaMalloc((void **)&extracted_activation_device,
n_batch * n_channel * n_max_coord * size_of_float);
cudaMalloc((void **)&coord_device,
n_batch * n_max_coord * dim_coord * size_of_float);
// Grids ans threads
dim3 g_size_r((n_batch * n_max_coord * dim_coord) / 256, 1, 1);
if ((n_batch * n_max_coord * dim_coord) % 256 != 0)
g_size_r.x += 1;
cudaMemset(extracted_activation_device, 0,
n_batch * n_channel * n_max_coord * size_of_float);
// Copy coordinates to the device
cudaMemcpy(coord_device, &coords[0],
n_batch * n_max_coord * dim_coord * size_of_float,
cudaMemcpyHostToDevice);
// Copy of part of query actually being treated
cudaMemcpy(activation_device, &activation[0],
n_batch * n_channel * height * width * size_of_float,
cudaMemcpyHostToDevice);
// Grids ans threads
dim3 g_size((n_batch * n_max_coord) / 256, 1, 1);
dim3 t_size(256, 1, 1);
if ((n_batch * n_max_coord) % 256 != 0)
g_size.x += 1;
extract_with_interpolation<<<g_size, t_size>>>(
n_batch * n_max_coord, activation_device, coord_device,
extracted_activation_device, n_max_coord, n_channel, height, width);
// Memory copy of output from device to host
cudaMemcpy(extracted_activation, &extracted_activation_device[0],
n_batch * n_channel * n_max_coord * size_of_float,
cudaMemcpyDeviceToHost);
// Free memory
cudaFree(coord_device);
cudaFree(activation_device);
cudaFree(extracted_activation_device);
}
/**
* K nearest neighbor algorithm
* - Initialize CUDA
* - Allocate device memory
* - Copy point sets (reference and query points) from host to device memory
* - Compute the distances + indexes to the k nearest neighbors for each query
* point
* - Copy distances from device to host memory
*
* @param ref_host reference points ; pointer to linear matrix
* @param ref_width number of reference points ; width of the matrix
* @param query_host query points ; pointer to linear matrix
* @param query_width number of query points ; width of the matrix
* @param height dimension of points ; height of the matrices
* @param k number of neighbor to consider
* @param dist_host distances to k nearest neighbors ; pointer to linear
* matrix
* @param dist_host indexes of the k nearest neighbors ; pointer to linear
* matrix
*
*/
void knn_cuda(float *ref_host, int ref_width, float *query_host,
int query_width, int height, int k, float *dist_host,
int *ind_host) {
// Variables
// CUDA Initialisation
cuInit(0);
// Grids ans threads
dim3 g_16x16(query_width / 16, ref_width / 16, 1);
dim3 t_16x16(16, 16, 1);
if (query_width % 16 != 0)
g_16x16.x += 1;
if (ref_width % 16 != 0)
g_16x16.y += 1;
//
dim3 g_256x1(query_width / 256, 1, 1);
dim3 t_256x1(256, 1, 1);
if (query_width % 256 != 0)
g_256x1.x += 1;
dim3 g_k_16x16(query_width / 16, k / 16, 1);
dim3 t_k_16x16(16, 16, 1);
if (query_width % 16 != 0)
g_k_16x16.x += 1;
if (k % 16 != 0)
g_k_16x16.y += 1;
// Kernel 1: Compute all the distances
cuComputeDistanceGlobal<<<g_16x16, t_16x16>>>(ref_host, ref_width, query_host,
query_width, height, dist_host);
// Kernel 2: Sort each column
cuInsertionSort<<<g_256x1, t_256x1>>>(dist_host, ind_host, query_width,
ref_width, k);
// Kernel 3: Compute square root of k first elements
cuParallelSqrt<<<g_k_16x16, t_k_16x16>>>(dist_host, query_width, k);
cudaDeviceSynchronize();
}
float compute_distance(const float *ref, int ref_nb, const float *query,
int query_nb, int dim, int ref_index, int query_index) {
float sum = 0.f;
for (int d = 0; d < dim; ++d) {
const float diff =
ref[d * ref_nb + ref_index] - query[d * query_nb + query_index];
sum += diff * diff;
}
return sqrtf(sum);
}
void modified_insertion_sort(float *dist, int *index, int length, int k) {
// Initialise the first index
index[0] = 0;
// Go through all points
for (int i = 1; i < length; ++i) {
// Store current distance and associated index
float curr_dist = dist[i];
int curr_index = i;
// Skip the current value if its index is >= k and if it's higher the k-th
// slready sorted mallest value
if (i >= k && curr_dist >= dist[k - 1]) {
continue;
}
// Shift values (and indexes) higher that the current distance to the right
int j = min(i, k - 1);
while (j > 0 && dist[j - 1] > curr_dist) {
dist[j] = dist[j - 1];
index[j] = index[j - 1];
--j;
}
// Write the current distance and index at their position
dist[j] = curr_dist;
index[j] = curr_index;
}
}
bool knn_c(const float *ref, int ref_nb, const float *query, int query_nb,
int dim, int k, float *knn_dist, int *knn_index) {
// Allocate local array to store all the distances / indexes for a given query
// point
float *dist = (float *)malloc(ref_nb * sizeof(float));
int *index = (int *)malloc(ref_nb * sizeof(int));
// Allocation checks
if (!dist || !index) {
printf("Memory allocation error\n");
free(dist);
free(index);
return false;
}
// Process one query point at the time
for (int i = 0; i < query_nb; ++i) {
// Compute all distances / indexes
for (int j = 0; j < ref_nb; ++j) {
dist[j] = compute_distance(ref, ref_nb, query, query_nb, dim, j, i);
index[j] = j;
}
// Sort distances / indexes
modified_insertion_sort(dist, index, ref_nb, k);
// Copy k smallest distances and their associated index
for (int j = 0; j < k; ++j) {
knn_dist[j * query_nb + i] = dist[j];
knn_index[j * query_nb + i] = index[j];
}
}
// Memory clean-up
free(dist);
free(index);
return true;
}
/**
* Example of use of kNN search CUDA.
*/
int main(void) {
// Variables and parameters
float *ref; // Pointer to reference point array
float *query; // Pointer to query point array
float *dist, *dist_c; // Pointer to distance array
int *ind, *ind_c; // Pointer to index array
int ref_nb = 4096; // Reference point number, max=65535
int query_nb = 4096; // Query point number, max=65535
int dim = 32; // Dimension of points
int k = 20; // Nearest neighbors to consider
int iterations = 100;
int c_iterations = 10;
int i;
const float precision = 0.001f; // distance error max
int nb_correct_precisions = 0;
int nb_correct_indexes = 0;
float *knn_dist = (float *)malloc(query_nb * k * sizeof(float));
int *knn_index = (int *)malloc(query_nb * k * sizeof(int));
// Memory allocation
// ref = (float *)malloc(ref_nb * dim * sizeof(float));
// query = (float *)malloc(query_nb * dim * sizeof(float));
cudaMallocManaged(&ref, ref_nb * dim * sizeof(float));
cudaMallocManaged(&query, query_nb * dim * sizeof(float));
cudaMallocManaged(&dist, query_nb * ref_nb * sizeof(float));
cudaMallocManaged(&ind, query_nb * k * sizeof(int));
dist_c = (float *)malloc(query_nb * k * sizeof(float));
ind_c = (int *)malloc(query_nb * k * sizeof(float));
// Init
srand(time(NULL));
for (i = 0; i < ref_nb * dim; i++)
ref[i] = (float)rand() / (float)RAND_MAX;
for (i = 0; i < query_nb * dim; i++)
query[i] = (float)rand() / (float)RAND_MAX;
printf("Ground truth computation in progress...\n\n");
if (!knn_c(ref, ref_nb, query, query_nb, dim, k, knn_dist, knn_index)) {
free(knn_dist);
free(knn_index);
return EXIT_FAILURE;
}
// Variables for duration evaluation
cudaEvent_t start, stop;
cudaEventCreate(&start);
cudaEventCreate(&stop);
float elapsed_time;
// Display informations
printf("Number of reference points : %6d\n", ref_nb);
printf("Number of query points : %6d\n", query_nb);
printf("Dimension of points : %4d\n", dim);
printf("Number of neighbors to consider : %4d\n", k);
printf("Processing kNN search :\n");
printf("On CPU: \n");
struct timeval tic;
gettimeofday(&tic, NULL);
for (i = 0; i < c_iterations; i++) {
knn_c(ref, ref_nb, query, query_nb, dim, k, dist_c, ind_c);
}
for (int i = 0; i < query_nb * k; ++i) {
if (fabs(dist_c[i] - knn_dist[i]) <= precision) {
nb_correct_precisions++;
}
if (ind_c[i] == knn_index[i]) {
nb_correct_indexes++;
}
}
struct timeval toc;
gettimeofday(&toc, NULL);
elapsed_time = toc.tv_sec - tic.tv_sec;
elapsed_time += (toc.tv_usec - tic.tv_usec) / 1000000.;
float precision_accuracy = nb_correct_precisions / ((float)query_nb * k);
float index_accuracy = nb_correct_indexes / ((float)query_nb * k);
printf("%f, %f\n", precision_accuracy, index_accuracy);
printf(" done in %f s for %d iterations (%f s by iteration)\n", elapsed_time,
c_iterations, elapsed_time / (c_iterations));
printf("on GPU: \n");
// Call kNN search CUDA
cudaEventRecord(start, 0);
for (i = 0; i < iterations; i++) {
knn_cuda(ref, ref_nb, query, query_nb, dim, k, dist, ind);
}
nb_correct_precisions = 0;
nb_correct_indexes = 0;
for (int i = 0; i < query_nb * k; ++i) {
if (fabs(dist[i] - knn_dist[i]) <= precision) {
nb_correct_precisions++;
}
if (ind[i] == knn_index[i]) {
nb_correct_indexes++;
}
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&elapsed_time, start, stop);
precision_accuracy = nb_correct_precisions / ((float)query_nb * k);
index_accuracy = nb_correct_indexes / ((float)query_nb * k);
printf("%f, %f\n", precision_accuracy, index_accuracy);
printf(" done in %f s for %d iterations (%f s by iteration)\n",
elapsed_time / 1000, iterations, elapsed_time / (iterations * 1000));
// Destroy cuda event object and free memory
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaFree(ind);
cudaFree(dist);
cudaFree(query);
cudaFree(ref);
free(dist_c);
free(ind_c);
} |
126b592ffcea9da0e8f2c4ea84df915cab6aa99e.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <fstream>
#include <string.h>
#include <stdio.h>
#include <vector>
#include <time.h>
#include <nvmatrix.cuh>
#include <cudaconv2.cuh>
#include "routines.cuh"
#include <layer_kernels.cuh>
using namespace std;
extern LayerOpt opt1, opt2, opt3, opt4, optTop;
extern FILE* pFile;
//void assignOpt();
// test blocks
void testSamplePatches() {
printf("testing samplePatches()...\n");
int patchSize = 16;
int numPatches = 10000;
int dims[3] = {32, 32, 3};
int numRecords = 1000;
char* in_name = "/scratch0/qwang37/cifar-10-batches-bin/cifar_normalized.bin";
char* out_name1 = "/scratch0/qwang37/cifar-10-batches-bin/cifar_samplePatch_test_half.bin";
samplePatches(patchSize, numPatches, dims, numRecords, in_name, out_name1);
patchSize = 32;
char* out_name2 = "/scratch0/qwang37/cifar-10-batches-bin/cifar_samplePatch_test_full.bin";
samplePatches(patchSize, numPatches, dims, numRecords, in_name, out_name2);
printf("samplePatches() test done!\n");
}
void testMult() {
printf("rightMult test\n");
MTYPE data1[6] = {0,1,2,3,4,5};
Matrix m1(data1, 2, 3, false);
NVMatrix nvm1(m1, true);
MTYPE data2[9] = {0,1,2,3,4,5,6,7,8};
Matrix m2(data2, 3, 3, false);
NVMatrix nvm2(m2, true);
NVMatrix nvm3;
nvm1.rightMult(nvm2,1);
nvm1.printShape("nvm1");
printf("nvm1 _isTrans = %d\n", nvm1.isTrans());
nvm1.print(nvm1.getNumRows(), nvm1.getNumCols());
}
void testConv() {
printf("convolution test\n");
MTYPE* data = (MTYPE*) malloc(8*3*3*2*sizeof(MTYPE));
for (int i = 1; i <= 8*3*3*2; i++) {
data[i-1] = i; // first image is a bunch of "0"s; second image is a bunch of "1"s
}
Matrix *im = new Matrix(data, 8*3*3, 2, true); // the transpose specification does not have effect here!
NVMatrix *nvim = new NVMatrix(*im, true);
printf("filters\n");
MTYPE* data2 = (MTYPE*) malloc(32*4*2*sizeof(MTYPE));
for (int i = 1; i <= 32*4*2; i++) {
data2[i-1] = i; // filters are a bunch of "1"s
}
Matrix *f = new Matrix(data2, 32*4, 2, true);
NVMatrix *nvf = new NVMatrix(*f, true);
NVMatrix *targets = new NVMatrix();
convWeightActs(*nvim, *nvf, *targets, 3, 2, 2, 2, 0, 1, 8, 2, 0);
printf("numRows: %d, numCols: %d\n", targets->getNumRows(), targets->getNumCols());
targets->print(targets->getNumRows(), targets->getNumCols());
}
void testMatrixIO() {
printf("testing Matrix IO...\n");
MTYPE data[8] = {0, 1, 2, 3, 4, 5, 6, 7};
Matrix rm(data, 2, 4, true);
Matrix cm(data, 4, 2, false);
NVMatrix NVrm(rm, true);
NVMatrix NVcm(cm, true);
NVSaveToFile(NVrm, "NVrm.bin");
NVSaveToFile(NVcm, "NVcm.bin");
/* attention: the Matrix and NVMatrix classes do not
have proper direct copying assignment operators!
assignment has to be done through reference
*/
NVMatrix NVrm1(2,4), NVcm1(4,2);
NVrm1.setTrans(true); NVcm1.setTrans(false);
NVReadFromFile(NVrm1, "NVrm.bin");
NVReadFromFile(NVcm1, "NVcm.bin");
NVrm1.printShape("NVrm1");
NVrm1.print(NVrm1.getNumRows(), NVrm1.getNumCols());
NVcm1.printShape("NVcm1");
NVcm1.print(NVcm1.getNumRows(), NVcm1.getNumCols());
printf("Matrix IO test complete!\n");
}
void testDataIO() {
printf("testing Data IO...\n");
MTYPE data[8] = {0, 1, 2, 3, 4, 5, 6, 7};
Matrix rm(data, 2, 4, true);
Matrix cm(data, 4, 2, false);
NVMatrix NVrm(rm, true);
NVMatrix NVcm(cm, true);
NVSaveToFile(NVrm, "NVm.bin", true);
NVSaveToFile(NVcm, "NVm.bin", true);
/* attention: the Matrix and NVMatrix classes do not
have proper direct copying assignment operators!
assignment has to be done through reference
*/
NVMatrix NVrm1(2,4), NVcm1(4,2);
NVrm1.setTrans(true); NVcm1.setTrans(false);
NVReadFromFile(NVrm1, "NVm.bin");
NVReadFromFile(NVcm1, "NVm.bin", 2);
NVrm1.printShape("NVrm1");
NVrm1.print(NVrm1.getNumRows(), NVrm1.getNumCols());
NVcm1.printShape("NVcm1");
NVcm1.print(NVcm1.getNumRows(), NVcm1.getNumCols());
printf("Data IO test complete!\n");
}
/*
void testTrainFCAE() {
printf("testing trainFCAE()...\n");
LayerOpt opt1;
char* layerName = "layer1";
char* df = "/scratch0/qwang37/cifar-10-batches-bin/cifar_patches.bin";
opt1.layerName = layerName;
opt1.dataFile = df;
opt1.patchSize = 5;
opt1.numChannels = 3;
opt1.numFilters = 64;
opt1.batchSize = 2000;
opt1.batchNum = 1000;
opt1.numEpochs = 100;
opt1.initstv = 0.01;
opt1.mom = 0.0;
opt1.lrW = 0.01;
opt1.lrB = 0.01;
opt1.weightDecay = 0.003;
opt1.sparseParam = 0.035;
opt1.sparseWeight = 0.0;
NVMatrix weight1; NVMatrix bias1; // parameters for the first layer
char* weightFile ="/scratch0/qwang37/cifar-10-batches-bin/filters5x5_layer1.bin";
char* biasFile = "/scratch0/qwang37/cifar-10-batches-bin/biases5x5_layer1.bin";
trainFCAE(opt1, weight1, bias1, weightFile, biasFile);
//weight1.printShape("forward weight");
//weight1.print(weight1.getNumRows(), weight1.getNumCols());
printf("trainFCAE() test complete!\n");
}
*/
/*
void testGenerateDataConv(char* poolType) {
printf("testing testGenerateDataConv(%s)...\n", poolType);
Dim dims;
dims.dataX = 32; dims.dataY = 32; dims.dataC = 3; dims.batchSize = 5000; dims.numBatches = 10;
dims.filterX = 5; dims.numFilters = 64; dims.stride = 1; dims.padding = 0;
dims.poolSize = 3; dims.poolStride = 2; dims.poolStartX = 0; strcpy(dims.pooler, poolType);
dims.poolOutX = (dims.dataX - dims.filterX + 1 - dims.poolSize) / dims.poolStride + 1;
char* sourceFile = "/scratch0/qwang37/cifar-10-batches-bin/cifar_normalized.bin";
char* destFile = (char*) malloc (100);
strcpy(destFile, "/scratch0/qwang37/cifar-10-batches-bin/cifar_layer2data_conv_");
strcat(destFile, poolType);
strcat(destFile, ".bin");
remove(destFile);
char* layerType = "conv";
NVMatrix weight(dims.filterX*dims.filterX*dims.dataC, dims.numFilters), biases(1, dims.numFilters);
NVReadFromFile(weight, "/scratch0/qwang37/cifar-10-batches-bin/filters5x5_layer1.bin");
NVReadFromFile(biases, "/scratch0/qwang37/cifar-10-batches-bin/biases5x5_layer1.bin");
generateData(sourceFile, destFile, layerType, weight, biases, dims);
printf("testGenerateDataConv() test complete!\n");
}
*/
/*
void testGenerateDataFC() {
printf("testing testGenerateDataFC()...\n");
Dim dims;
dims.dataX = 5; dims.dataY = 5; dims.dataC = 3; dims.batchSize = 100000; dims.numBatches = 5;
dims.filterX = 5; dims.numFilters = 64; dims.stride = 1; dims.padding = 0;
char* sourceFile = "/scratch0/qwang37/cifar-10-batches-bin/cifar_patches.bin";
char* destFile = "/scratch0/qwang37/cifar-10-batches-bin/cifar_layer2data_FC.bin";
char* layerType = "FC";
NVMatrix weight(dims.filterX*dims.filterX*dims.dataC, dims.numFilters), biases(1, dims.numFilters);
NVReadFromFile(weight, "/scratch0/qwang37/cifar-10-batches-bin/filters5x5_layer1.bin");
NVReadFromFile(biases, "/scratch0/qwang37/cifar-10-batches-bin/biases5x5_layer1.bin");
remove(destFile);
generateData(sourceFile, destFile, layerType, weight, biases, dims);
printf("testGenerateDataFC() test complete!\n");
}
*/
void testNVLabelReadFromFile() {
printf("testing NVLabelReadFromFile()...\n");
NVMatrix labels(10,10);
labels.setTrans(false);
NVLabelReadFromFile(labels, "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", 101);
labels.printShape("labels");
labels.print(10,10);
printf("NVLabelReadFromFile() test complete!\n");
}
void testNVRawLabelReadFromFile() {
printf("testing NVRawLabelReadFromFile()...\n");
NVMatrix labels(1,10);
NVRawLabelReadFromFile(labels, "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", 101);
labels.printShape("labels");
labels.print(1,10);
printf("NVRawLabelReadFromFile() test complete!\n");
}
void finetune_rnorm() {
////assignOpt();
printf("starting finetune_rnorm()!\n");
fprintf(pFile, "starting finetune_rnorm!\n");
// initialize cublas
hipSetDevice(cutGetMaxGflopsDeviceId());
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipblasInit();
// data and parameters storage
NVMatrix act1, act1Pool, act1PoolNorm, act1Denom;
NVMatrix act2, act2Norm, act2NormPool, act2Denom;
NVMatrix act3;
NVMatrix act4;
NVMatrix actTop;
NVMatrix act1Grad, act1PoolGrad, act1PoolNormGrad;
NVMatrix act2Grad, act2NormGrad, act2NormPoolGrad;
NVMatrix act3Grad;
NVMatrix act4Grad;
NVMatrix actTopGrad;
NVMatrix weight1, weight2, weight3, weight4, weightTop;
NVMatrix weight1Grad, weight2Grad, weight3Grad, weight4Grad, weightTopGrad;
NVMatrix weight1Inc, weight2Inc, weight3Inc, weight4Inc, weightTopInc;
NVMatrix weight1GradTmp, weight2GradTmp, weight3GradTmp, weight4GradTmp, weightTopGradTmp;
NVMatrix bias1, bias2, bias3, bias4, biasTop; // bias4 is just an all-zero dummy vector
NVMatrix bias1Grad, bias2Grad, bias3Grad, bias4Grad, biasTopGrad;
NVMatrix bias1Inc, bias2Inc, bias3Inc, bias4Inc, biasTopInc;
// initialize parameters
if (opt1.loadParam) {
weight1.resize(opt1.numVis, opt1.numFilters);
weight2.resize(opt2.numVis, opt2.numFilters);
weight3.resize(opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
weight4.resize(opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters);
weightTop.resize(optTop.numVis, optTop.numFilters);
bias1.resize(opt1.numFilters, 1);
bias2.resize(opt2.numFilters, 1);
bias3.resize(opt3.numFilters * opt3.outX * opt3.outX, 1);
bias4.resize(opt4.numFilters * opt4.outX * opt4.outX, 1);
biasTop.resize(1, optTop.numFilters);
biasTop.setTrans(true);
NVReadFromFile(weight1, "/scratch0/qwang37/cifar-10-batches-bin/weight1.bin");
NVReadFromFile(weight2, "/scratch0/qwang37/cifar-10-batches-bin/weight2.bin");
NVReadFromFile(weight3, "/scratch0/qwang37/cifar-10-batches-bin/weight3.bin");
NVReadFromFile(weight4, "/scratch0/qwang37/cifar-10-batches-bin/weight4.bin");
NVReadFromFile(weightTop, "/scratch0/qwang37/cifar-10-batches-bin/weightTop.bin");
NVReadFromFile(bias1, "/scratch0/qwang37/cifar-10-batches-bin/bias1.bin");
NVReadFromFile(bias2, "/scratch0/qwang37/cifar-10-batches-bin/bias2.bin");
NVReadFromFile(bias3, "/scratch0/qwang37/cifar-10-batches-bin/bias3.bin");
NVReadFromFile(bias4, "/scratch0/qwang37/cifar-10-batches-bin/bias4.bin");
NVReadFromFile(biasTop, "/scratch0/qwang37/cifar-10-batches-bin/biasTop.bin");
}
else {
initWeights(weight1, opt1.numVis, opt1.numFilters, false, opt1.initstv);
initWeights(weight2, opt2.numVis, opt2.numFilters, false, opt2.initstv);
initWeights(weight3, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, opt3.initstv);
initWeights(weight4, opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters, false, opt4.initstv);
initWeights(weightTop, optTop.numVis, optTop.numFilters, false, optTop.initstv);
initWeights(bias1, opt1.numFilters, 1, false, 0.0);
initWeights(bias2, opt2.numFilters, 1, false, 0.0);
initWeights(bias3, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0);
initWeights(bias4, opt4.numFilters * opt4.outX * opt4.outX, 1, false, 0.0);
initWeights(biasTop, 1, optTop.numFilters, true, 0.0);
}
initWeights(weight1Inc, opt1.numVis, opt1.numFilters, false, 0.0); initWeights(weight1Grad, opt1.numVis, opt1.numFilters, false, 0.0);
initWeights(weight2Inc, opt2.numVis, opt2.numFilters, false, 0.0); initWeights(weight2Grad, opt2.numVis, opt2.numFilters, false, 0.0);
initWeights(weight3Inc, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, 0.0); initWeights(weight3Grad, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, 0.0); // not useful for 3 and 4
initWeights(weight4Inc, opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters, false, 0.0); initWeights(weight4Grad, opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters, false, 0.0);
initWeights(weightTopInc, optTop.numVis, optTop.numFilters, false, 0.0); initWeights(weightTopGrad, optTop.numVis, optTop.numFilters, false, 0.0);
initWeights(bias1Inc, opt1.numFilters, 1, false, 0.0); initWeights(bias1Grad, opt1.numFilters, 1, false, 0.0);
initWeights(bias2Inc, opt2.numFilters, 1, false, 0.0); initWeights(bias2Grad, opt2.numFilters, 1, false, 0.0);
initWeights(bias3Inc, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0); initWeights(bias3Grad, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0); // not useful for 3
initWeights(bias4Inc, opt4.numFilters * opt4.outX * opt4.outX, 1, false, 0.0); initWeights(bias4Grad, opt4.numFilters * opt4.outX * opt4.outX, 1, false, 0.0);
initWeights(biasTopInc, 1, opt1.labelSize, true, 0.0); initWeights(biasTopGrad, 1, opt1.labelSize, true, 0.0);
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int trainBatchNum = opt1.numTrain / batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTrain(trainBatchNum), CPUTest(testBatchNum);
vector<NVMatrix*> GPUTrain(trainBatchNum), GPUTest(testBatchNum);
vector<NVMatrix*> GPURawLabelTrain(trainBatchNum), GPURawLabelTest(testBatchNum);
for (int batch = 0; batch < trainBatchNum; batch++) {
CPUTrain[batch] = new Matrix(imPixels, batchSize);
CPUTrain[batch]->setTrans(false);
GPUTrain[batch] = new NVMatrix();
hmReadFromFile(*CPUTrain[batch], opt1.dataPath + "/cifar_raw.bin", batch*batchSize);
GPURawLabelTrain[batch] = new NVMatrix(1, batchSize);
GPURawLabelTrain[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], opt1.dataPath + "/cifar_labels.bin", batch*batchSize);
}
batchSize = opt1.numTrain % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTrain.push_back(new Matrix(imPixels, batchSize));
CPUTrain.back()->setTrans(false);
GPUTrain.push_back(new NVMatrix());
hmReadFromFile(*CPUTrain.back(), opt1.dataPath + "/cifar_raw.bin", trainBatchNum*batchSize);
GPURawLabelTrain.push_back(new NVMatrix(1, batchSize));
GPURawLabelTrain.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), opt1.dataPath + "/cifar_labels.bin", trainBatchNum*batchSize);
}
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
GPUTest[batch] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
NVMatrix trueLabelLogProbs;
NVMatrix correctProbs;
MTYPE cost; // as before, we trace the performance using the cost variable
MTYPE cost1;
NVMatrix absM;
MTYPE weightAbs1, weightAbs2, weightAbs3, weightAbs4, weightAbsTop;
MTYPE biasAbs1, biasAbs2, biasAbs3, biasAbs4, biasAbsTop;
MTYPE weightGradAbs1, weightGradAbs2, weightGradAbs3, weightGradAbs4, weightGradAbsTop;
MTYPE biasGradAbs1, biasGradAbs2, biasGradAbs3, biasGradAbs4, biasGradAbsTop;
clock_t startClock;
clock_t tick;
float lr_scale = 1.0, mom_scale = 1.0;
cropDataProvider(CPUTest, GPUTest, opt1, true, opt1.whitened); // test data is fixed
for (int epoch = 0; epoch < opt1.numEpochs; epoch++) {
cost = 0;
cost1 = 0;
cropDataProvider(CPUTrain, GPUTrain, opt1, false, opt1.whitened); // copy data to the GPU side
hipDeviceSynchronize();
startClock = clock();
for (int batch = 0; batch < GPUTrain.size(); batch++) {
batchSize = GPUTrain[batch]->getNumCols();
// ====forward pass====
// 0->1
//cout << "0->1\n";
//original
activateConv(*GPUTrain[batch], act1, weight1, bias1, opt1);
act1.apply(ReluOperator());
act1Pool.transpose(false);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convResponseNormCrossMap(act1Pool, act1Denom, act1PoolNorm, opt1.numFilters, opt1.sizeF, opt1.addScale/opt1.sizeF, opt1.powScale, false);
// 1->2
//cout << "1->2\n";
//original
activateConv(act1PoolNorm, act2, weight2, bias2, opt2);
act2.apply(ReluOperator());
convResponseNormCrossMap(act2, act2Denom, act2Norm, opt2.numFilters, opt2.sizeF, opt2.addScale/opt2.sizeF, opt2.powScale, false);
act2NormPool.transpose(false);
convLocalPool(act2Norm, act2NormPool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
// 2->3
//cout << "2->3\n";
// original
activateLocal(act2NormPool, act3, weight3, bias3, opt3);
act3.apply(ReluOperator());
// 3->4
//cout << "3->4\n";
// original
activateLocal(act3, act4, weight4, bias4, opt4);
act4.apply(ReluOperator());
// 4->top
//cout << "4->top\n";
actTop.transpose(true);
actTop.resize(batchSize, opt1.labelSize);
activate(act4, actTop, weightTop, biasTop, 0, 1);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
// compute cost
computeLogregSoftmaxGrad(*GPURawLabelTrain[batch], actTop, actTopGrad, false, 1);
actTop.transpose(false);
computeLogregCost(*GPURawLabelTrain[batch], actTop, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum() / batchSize;
cost1 += trueLabelLogProbs.sum() / batchSize;
// ====== back pass ======
// top -> 4, 3, 2, 1
//cout << "top -> 4, 3, 2, 1";
// weight update
NVMatrix& act4T = act4.getTranspose();
weightTopGrad.addProduct(act4T, actTopGrad, 0, 1);
biasTopGrad.addSum(actTopGrad, 0, 0, 1);
delete &act4T;
// bp
actTopGrad.transpose(true);
NVMatrix& weightTopT = weightTop.getTranspose();
act4Grad.addProduct(actTopGrad, weightTopT, 0, 1);
delete &weightTopT;
// 4->3
//cout << "4->3\n";
act4Grad.transpose(false); // convert back to row-major
act4.transpose(false);
act4Grad.applyBinary(ReluGradientOperator(), act4);
localWeightActs(act3, act4Grad, weight4Grad, opt4.imSize, opt4.outX, opt4.outX, opt4.patchSize, opt4.paddingStart, 1, opt4.numChannels, 1);
bias4Grad.addSum(act4Grad, 1, 0, 1);
localImgActs(act4Grad, weight4, act3Grad, opt4.imSize, opt4.imSize, opt4.outX, opt4.paddingStart, 1, opt4.numChannels, 1);
// 3->2
//cout << "3->2\n";
// original part
act3Grad.transpose(false); // convert back to row-major
act3.transpose(false);
act3Grad.applyBinary(ReluGradientOperator(), act3);
localWeightActs(act2NormPool, act3Grad, weight3Grad, opt3.imSize, opt3.outX, opt3.outX, opt3.patchSize, opt3.paddingStart, 1, opt3.numChannels, 1);
bias3Grad.addSum(act3Grad, 1, 0, 1);
localImgActs(act3Grad, weight3, act2NormPoolGrad, opt3.imSize, opt3.imSize, opt3.outX, opt3.paddingStart, 1, opt3.numChannels, 1);
// 2->1
//cout << "2->1\n";
// original part
act2NormPoolGrad.transpose(false);
act2NormPool.transpose(false);
convLocalMaxUndo(act2Norm, act2NormPoolGrad, act2NormPool, act2NormGrad, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX);
convResponseNormCrossMapUndo(act2NormGrad, act2Denom, act2, act2Norm, act2Grad, opt2.numFilters, opt2.sizeF, opt2.addScale/opt2.sizeF, opt2.powScale, false, 0, 1);
act2Grad.applyBinary(ReluGradientOperator(), act2);
convWeightActs(act1PoolNorm, act2Grad, weight2GradTmp, opt2.imSize, opt2.outX, opt2.outX, opt2.patchSize, opt2.paddingStart, 1, opt2.numChannels, 1, opt2.partialSum);
weight2GradTmp.reshape(opt2.outX * opt2.outX / opt2.partialSum, opt2.numChannels * opt2.patchSize * opt2.patchSize * opt2.numFilters);
weight2Grad.addSum(weight2GradTmp, 0, 0, 1);
weight2Grad.reshape(opt2.numChannels * opt2.patchSize * opt2.patchSize, opt2.numFilters);
act2Grad.reshape(opt2.numFilters, opt2.outX * opt2.outX * batchSize);
bias2Grad.addSum(act2Grad, 1, 0, 1);
act2Grad.reshape(opt2.numFilters * opt2.outX * opt2.outX, batchSize);
convImgActs(act2Grad, weight2, act1PoolNormGrad, opt2.imSize, opt2.imSize, opt2.outX, opt2.paddingStart, 1, opt2.numChannels, 1);
// 1->0
//cout << "1->0\n";
// original part
act1PoolNormGrad.transpose(false);
act1PoolNorm.transpose(false);
convResponseNormCrossMapUndo(act1PoolNormGrad, act1Denom, act1Pool, act1PoolNorm, act1PoolGrad, opt1.numFilters, opt1.sizeF, opt1.addScale/opt1.sizeF, opt1.powScale, false, 0, 1);
convLocalMaxUndo(act1, act1PoolGrad, act1Pool, act1Grad, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX);
act1Grad.applyBinary(ReluGradientOperator(), act1);
convWeightActs(*GPUTrain[batch], act1Grad, weight1GradTmp, opt1.imSize, opt1.outX, opt1.outX, opt1.patchSize, opt1.paddingStart, 1, opt1.numChannels, 1, opt1.partialSum);
weight1GradTmp.reshape(opt1.outX * opt1.outX / opt1.partialSum, opt1.numChannels * opt1.patchSize * opt1.patchSize * opt1.numFilters);
weight1Grad.addSum(weight1GradTmp, 0, 0, 1);
weight1Grad.reshape(opt1.numChannels * opt1.patchSize * opt1.patchSize, opt1.numFilters);
act1Grad.reshape(opt1.numFilters, opt1.outX * opt1.outX * batchSize);
bias1Grad.addSum(act1Grad, 1, 0, 1);
act1Grad.reshape(opt1.numFilters * opt1.outX * opt1.outX, batchSize);
// update
lr_scale = lrDecay(lr_scale, opt1.lrDecayType, opt1.lrDecayFactor, opt1.lrMinRate);
mom_scale = momInc(mom_scale, opt1.momIncType, opt1.momIncFactor, opt1.momMaxRate);
updateWeight(weight1Grad, weight1Inc, weight1, opt1, batchSize, lr_scale, mom_scale);
updateWeight(weight2Grad, weight2Inc, weight2, opt2, batchSize, lr_scale, mom_scale);
updateWeight(weight3Grad, weight3Inc, weight3, opt3, batchSize, lr_scale, mom_scale);
updateWeight(weight4Grad, weight4Inc, weight4, opt4, batchSize, lr_scale, mom_scale);
updateWeight(weightTopGrad, weightTopInc, weightTop, optTop, batchSize, lr_scale, mom_scale);
updateBias(bias1Grad, bias1Inc, bias1, opt1, batchSize, lr_scale, mom_scale);
updateBias(bias2Grad, bias2Inc, bias2, opt2, batchSize, lr_scale, mom_scale);
updateBias(bias3Grad, bias3Inc, bias3, opt3, batchSize, lr_scale, mom_scale);
updateBias(bias4Grad, bias4Inc, bias4, opt4, batchSize, lr_scale, mom_scale);
updateBias(biasTopGrad, biasTopInc, biasTop, optTop, batchSize, lr_scale, mom_scale);
} // for (int epoch = 0; epoch < opt1.numEpochs; epoch++)
hipDeviceSynchronize();
cost /= CPUTrain.size();
cost1 /= CPUTrain.size();
printf("\nfinished epoch %d of %d; classify precision = %f; objective = %f; elapsed time = %f seconds\n", epoch, opt1.numEpochs,
cost, cost1, (float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "\nfinished epoch %d of %d; classify precision = %f; objective = %f; elapsed time = %f seconds\n", epoch, opt1.numEpochs,
cost, cost1, (float)(clock() - startClock)/CLOCKS_PER_SEC);
/*
weight1.apply(NVMatrixOps::Abs(), absM);
weightAbs1 = absM.sum() / absM.getNumElements();
weight2.apply(NVMatrixOps::Abs(), absM);
weightAbs2 = absM.sum() / absM.getNumElements();
weight3.apply(NVMatrixOps::Abs(), absM);
weightAbs3 = absM.sum() / absM.getNumElements();
weight4.apply(NVMatrixOps::Abs(), absM);
weightAbs4 = absM.sum() / absM.getNumElements();
weightTop.apply(NVMatrixOps::Abs(), absM);
weightAbsTop = absM.sum() / absM.getNumElements();
weight1Inc.apply(NVMatrixOps::Abs(), absM);
weightGradAbs1 = absM.sum() / absM.getNumElements();
weight2Inc.apply(NVMatrixOps::Abs(), absM);
weightGradAbs2 = absM.sum() / absM.getNumElements();
weight3Inc.apply(NVMatrixOps::Abs(), absM);
weightGradAbs3 = absM.sum() / absM.getNumElements();
weight4Inc.apply(NVMatrixOps::Abs(), absM);
weightGradAbs4 = absM.sum() / absM.getNumElements();
weightTopInc.apply(NVMatrixOps::Abs(), absM);
weightGradAbsTop = absM.sum() / absM.getNumElements();
bias1.apply(NVMatrixOps::Abs(), absM);
biasAbs1 = absM.sum() / absM.getNumElements();
bias2.apply(NVMatrixOps::Abs(), absM);
biasAbs2 = absM.sum() / absM.getNumElements();
bias3.apply(NVMatrixOps::Abs(), absM);
biasAbs3 = absM.sum() / absM.getNumElements();
bias4.apply(NVMatrixOps::Abs(), absM);
biasAbs4 = absM.sum() / absM.getNumElements();
biasTop.apply(NVMatrixOps::Abs(), absM);
biasAbsTop = absM.sum() / absM.getNumElements();
bias1Inc.apply(NVMatrixOps::Abs(), absM);
biasGradAbs1 = absM.sum() / absM.getNumElements();
bias2Inc.apply(NVMatrixOps::Abs(), absM);
biasGradAbs2 = absM.sum() / absM.getNumElements();
bias3Inc.apply(NVMatrixOps::Abs(), absM);
biasGradAbs3 = absM.sum() / absM.getNumElements();
bias4Inc.apply(NVMatrixOps::Abs(), absM);
biasGradAbs4 = absM.sum() / absM.getNumElements();
biasTopInc.apply(NVMatrixOps::Abs(), absM);
biasGradAbsTop = absM.sum() / absM.getNumElements();
printf("weight abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", weightAbs1, weightAbs2, weightAbs3, weightAbs4, weightAbsTop);
printf("weight grad abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", weightGradAbs1, weightGradAbs2, weightGradAbs3, weightGradAbs4, weightGradAbsTop);
printf("bias abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", biasAbs1, biasAbs2, biasAbs3, biasAbs4, biasAbsTop);
printf("bias grad abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", biasGradAbs1, biasGradAbs2, biasGradAbs3, biasGradAbs4, biasGradAbsTop);
fprintf(pFile, "weight abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", weightAbs1, weightAbs2, weightAbs3, weightAbs4, weightAbsTop);
fprintf(pFile, "weight grad abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", weightGradAbs1, weightGradAbs2, weightGradAbs3, weightGradAbs4, weightGradAbsTop);
fprintf(pFile, "bias abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", biasAbs1, biasAbs2, biasAbs3, biasAbs4, biasAbsTop);
fprintf(pFile, "bias grad abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", biasGradAbs1, biasGradAbs2, biasGradAbs3, biasGradAbs4, biasGradAbsTop);
*/
// process the test set every 3 epochs
if (epoch % 3 == 2) {
hipDeviceSynchronize();
startClock = clock();
cost = 0;
cost1 = 0;
for (int batch = 0; batch < GPUTest.size(); batch++) {
batchSize = GPUTest[batch]->getNumCols();
// ====forward pass====
// 0->1
//cout << "0->1\n";
//original
activateConv(*GPUTest[batch], act1, weight1, bias1, opt1);
act1.apply(ReluOperator());
act1Pool.transpose(false);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convResponseNormCrossMap(act1Pool, act1Denom, act1PoolNorm, opt1.numFilters, opt1.sizeF, opt1.addScale/opt1.sizeF, opt1.powScale, false);
// 1->2
//cout << "1->2\n";
//original
activateConv(act1PoolNorm, act2, weight2, bias2, opt2);
act2.apply(ReluOperator());
convResponseNormCrossMap(act2, act2Denom, act2Norm, opt2.numFilters, opt2.sizeF, opt2.addScale/opt2.sizeF, opt2.powScale, false);
act2NormPool.transpose(false);
convLocalPool(act2Norm, act2NormPool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
// 2->3
//cout << "2->3\n";
// original
activateLocal(act2NormPool, act3, weight3, bias3, opt3);
act3.apply(ReluOperator());
// 3->4
//cout << "3->4\n";
// original
activateLocal(act3, act4, weight4, bias4, opt4);
act4.apply(ReluOperator());
// 4->top
//cout << "4->top\n";
actTop.transpose(true);
actTop.resize(batchSize, opt1.labelSize);
activate(act4, actTop, weightTop, biasTop, 0, 1);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
// compute cost
computeLogregSoftmaxGrad(*GPURawLabelTest[batch], actTop, actTopGrad, false, 1);
actTop.transpose(false);
computeLogregCost(*GPURawLabelTest[batch], actTop, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum() / batchSize;
cost1 += trueLabelLogProbs.sum() / batchSize;
} //for (int batch = opt1.batchNum; batch < opt1.batchNum+opt1.testBatchNum; batch++)
hipDeviceSynchronize();
cost /= GPUTest.size();
cost1 /= GPUTest.size();
printf("\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
// save checkpoint
char* weight1File = "/scratch0/qwang37/cifar-10-batches-bin/weight1.bin", *bias1File = "/scratch0/qwang37/cifar-10-batches-bin/bias1.bin";
char* weight2File = "/scratch0/qwang37/cifar-10-batches-bin/weight2.bin", *bias2File = "/scratch0/qwang37/cifar-10-batches-bin/bias2.bin";
char* weight3File = "/scratch0/qwang37/cifar-10-batches-bin/weight3.bin", *bias3File = "/scratch0/qwang37/cifar-10-batches-bin/bias3.bin";
char* weight4File = "/scratch0/qwang37/cifar-10-batches-bin/weight4.bin", *bias4File = "/scratch0/qwang37/cifar-10-batches-bin/bias4.bin";
char* weightTopFile = "/scratch0/qwang37/cifar-10-batches-bin/weightTop.bin", *biasTopFile = "/scratch0/qwang37/cifar-10-batches-bin/biasTop.bin";
NVSaveToFile(weight1, weight1File); NVSaveToFile(bias1, bias1File);
NVSaveToFile(weight2, weight2File); NVSaveToFile(bias2, bias2File);
NVSaveToFile(weight3, weight3File); NVSaveToFile(bias3, bias3File);
NVSaveToFile(weight4, weight4File); NVSaveToFile(bias4, bias4File);
NVSaveToFile(weightTop, weightTopFile); NVSaveToFile(biasTop, biasTopFile);
printf("Checkpoint saved!\n\n");
fprintf(pFile, "Checkpoint saved!\n\n");
} //if (epoch % 10 == 0)
} // for (int epoch = 0; epoch < opt1.numEpochs; epoch++)
printf("finetuning_rnorm() complete!\n");
fprintf(pFile, "finetuning_rnorm() complete!\n");
} // int finetune_rnorm()
void multiViewTest() {
////assignOpt();
printf("starting multiViewTest()!\n");
fprintf(pFile, "starting multiViewTest()!\n");
// initialize cublas
hipSetDevice(cutGetMaxGflopsDeviceId());
hipDeviceSetCacheConfig(hipFuncCachePreferShared);
hipblasInit();
// data and parameters storage
NVMatrix act1, act1Pool, act1PoolNorm, act1Denom;
NVMatrix act2, act2Norm, act2NormPool, act2Denom;
NVMatrix act3;
NVMatrix act4;
NVMatrix actTop;
NVMatrix softMaxAct;
NVMatrix weight1, weight2, weight3, weight4, weightTop;
NVMatrix bias1, bias2, bias3, bias4, biasTop; // bias4 is just an all-zero dummy vector
// initialize parameters
weight1.resize(opt1.numVis, opt1.numFilters);
weight2.resize(opt2.numVis, opt2.numFilters);
weight3.resize(opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
weight4.resize(opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters);
weightTop.resize(optTop.numVis, optTop.numFilters);
bias1.resize(opt1.numFilters, 1);
bias2.resize(opt2.numFilters, 1);
bias3.resize(opt3.numFilters * opt3.outX * opt3.outX, 1);
bias4.resize(opt4.numFilters * opt4.outX * opt4.outX, 1);
biasTop.resize(1, optTop.numFilters);
biasTop.setTrans(true);
NVReadFromFile(weight1, "/scratch0/qwang37/cifar-10-batches-bin/weight1.bin");
NVReadFromFile(weight2, "/scratch0/qwang37/cifar-10-batches-bin/weight2.bin");
NVReadFromFile(weight3, "/scratch0/qwang37/cifar-10-batches-bin/weight3.bin");
NVReadFromFile(weight4, "/scratch0/qwang37/cifar-10-batches-bin/weight4.bin");
NVReadFromFile(weightTop, "/scratch0/qwang37/cifar-10-batches-bin/weightTop.bin");
NVReadFromFile(bias1, "/scratch0/qwang37/cifar-10-batches-bin/bias1.bin");
NVReadFromFile(bias2, "/scratch0/qwang37/cifar-10-batches-bin/bias2.bin");
NVReadFromFile(bias3, "/scratch0/qwang37/cifar-10-batches-bin/bias3.bin");
NVReadFromFile(bias4, "/scratch0/qwang37/cifar-10-batches-bin/bias4.bin");
NVReadFromFile(biasTop, "/scratch0/qwang37/cifar-10-batches-bin/biasTop.bin");
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTest(testBatchNum);
vector<NVMatrix*> GPUTest(testBatchNum*opt1.numViews);
vector<NVMatrix*> GPURawLabelTest(testBatchNum);
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
for (int r = 0; r < opt1.numViews; r++)
GPUTest[batch*opt1.numViews+r] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
for (int r = 0; r < opt1.numViews; r++)
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
multiViewDataProvider(CPUTest, GPUTest, opt1, opt1.numViews, opt1.whitened); // copy data to the GPU side
NVMatrix trueLabelLogProbs;
NVMatrix correctProbs;
MTYPE cost; // as before, we trace the performance using the cost variable
MTYPE cost1;
clock_t startClock;
clock_t tick;
cost = 0;
cost1 = 0;
hipDeviceSynchronize();
startClock = clock();
for (int batch = 0; batch < CPUTest.size(); batch++) {
batchSize = CPUTest[batch]->getNumCols();
for (int r = 0; r < opt1.numViews; r++) {
// ====forward pass====
// 0->1
//cout << "0->1\n";
//original
activateConv(*GPUTest[batch*opt1.numViews+r], act1, weight1, bias1, opt1);
act1.apply(ReluOperator());
act1Pool.transpose(false);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convResponseNormCrossMap(act1Pool, act1Denom, act1PoolNorm, opt1.numFilters, opt1.sizeF, opt1.addScale/opt1.sizeF, opt1.powScale, false);
// 1->2
//cout << "1->2\n";
//original
activateConv(act1PoolNorm, act2, weight2, bias2, opt2);
act2.apply(ReluOperator());
convResponseNormCrossMap(act2, act2Denom, act2Norm, opt2.numFilters, opt2.sizeF, opt2.addScale/opt2.sizeF, opt2.powScale, false);
act2NormPool.transpose(false);
convLocalPool(act2Norm, act2NormPool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
// 2->3
//cout << "2->3\n";
// original
activateLocal(act2NormPool, act3, weight3, bias3, opt3);
act3.apply(ReluOperator());
// 3->4
//cout << "3->4\n";
// original
activateLocal(act3, act4, weight4, bias4, opt4);
act4.apply(ReluOperator());
// 4->top
//cout << "4->top\n";
actTop.transpose(true);
actTop.resize(batchSize, opt1.labelSize);
activate(act4, actTop, weightTop, biasTop, 0, 1);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
actTop.transpose(false);
if (r == 0)
actTop.copy(softMaxAct);
else
softMaxAct.add(actTop);
}// for (r = 0:9)
softMaxAct.scale(0.1);
computeLogregCost(*GPURawLabelTest[batch], softMaxAct, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum();
cost1 += trueLabelLogProbs.sum();
}//for (batches)
hipDeviceSynchronize();
cost /= opt1.numTest;
cost1 /= opt1.numTest;
printf("\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
printf("multiViewTest() complete!\n");
fprintf(pFile, "\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "multiViewTest() complete!\n");
} // void multiViewTest()
void testCropDataProvider() {
//assignOpt();
printf("starting test cropDataProvider()!\n");
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int trainBatchNum = opt1.numTrain / batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTrain(trainBatchNum), CPUTest(testBatchNum);
vector<NVMatrix*> GPUTrain(trainBatchNum), GPUTest(testBatchNum);
vector<NVMatrix*> GPURawLabelTrain(trainBatchNum), GPURawLabelTest(testBatchNum);
for (int batch = 0; batch < trainBatchNum; batch++) {
CPUTrain[batch] = new Matrix(imPixels, batchSize);
CPUTrain[batch]->setTrans(false);
GPUTrain[batch] = new NVMatrix();
hmReadFromFile(*CPUTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", batch*batchSize);
GPURawLabelTrain[batch] = new NVMatrix(1, batchSize);
GPURawLabelTrain[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", batch*batchSize);
}
batchSize = opt1.numTrain % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTrain.push_back(new Matrix(imPixels, batchSize));
CPUTrain.back()->setTrans(false);
GPUTrain.push_back(new NVMatrix());
hmReadFromFile(*CPUTrain.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", trainBatchNum*batchSize);
GPURawLabelTrain.push_back(new NVMatrix(1, batchSize));
GPURawLabelTrain.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", trainBatchNum*batchSize);
}
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
GPUTest[batch] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
cropDataProvider(CPUTrain, GPUTrain, opt1, false, false);
remove("/scratch0/qwang37/cifar-10-batches-bin/test_out.bin");
for (int batch = 0; batch < GPUTrain.size(); batch++) {
NVSaveToFile(*GPUTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/test_out.bin", true);
}
printf("cropDataProvider() test done!\n");
}
void testNVReadFromFileUint8() {
//assignOpt();
printf("starting test testNVReadFromFileUint8()!\n");
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int trainBatchNum = opt1.numTrain / batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTrain(trainBatchNum), CPUTest(testBatchNum);
vector<NVMatrix*> GPUTrain(trainBatchNum), GPUTest(testBatchNum);
vector<NVMatrix*> GPURawLabelTrain(trainBatchNum), GPURawLabelTest(testBatchNum);
for (int batch = 0; batch < trainBatchNum; batch++) {
CPUTrain[batch] = new Matrix(imPixels, batchSize);
CPUTrain[batch]->setTrans(false);
GPUTrain[batch] = new NVMatrix();
hmReadFromFile(*CPUTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", batch*batchSize);
GPURawLabelTrain[batch] = new NVMatrix(1, batchSize);
GPURawLabelTrain[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", batch*batchSize);
}
batchSize = opt1.numTrain % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTrain.push_back(new Matrix(imPixels, batchSize));
CPUTrain.back()->setTrans(false);
GPUTrain.push_back(new NVMatrix());
hmReadFromFile(*CPUTrain.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", trainBatchNum*batchSize);
GPURawLabelTrain.push_back(new NVMatrix(1, batchSize));
GPURawLabelTrain.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", trainBatchNum*batchSize);
}
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
GPUTest[batch] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
cropDataProvider(CPUTrain, GPUTrain, opt1, false, false);
remove("/scratch0/qwang37/cifar-10-batches-bin/test_out.bin");
for (int batch = 0; batch < GPUTrain.size(); batch++) {
NVSaveToFile(*GPUTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/test_out.bin", true);
}
printf("testNVReadFromFileUint8() test done!\n");
}
void centerData() {
ifstream in;
printf("starting centering data\n");
MTYPE* data = (MTYPE*) malloc(60000*3072*sizeof(MTYPE));
MTYPE* mean = (MTYPE*) malloc(3072*sizeof(MTYPE));
char* labels = (char*) malloc(60000*sizeof(char));
char dir_name[] = "/scratch0/qwang37/cifar-10-batches-bin/";
char file_name[] = "data_batch_1.bin";
char full_name[100];
int record_start;
for (int j = 0; j < 3072; j++)
mean[j] = 0.0;
for (int k = 1; k <= 5; k++) {
file_name[11] = '0' + k;
strcpy(full_name, dir_name);
strcat(full_name, file_name);
in.open(full_name, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
printf("open data file %d failed!\n", k);
exit(-1);
}
printf("reading batch %d\n", k);
for (int i = 0; i < 10000; i++) {
record_start = (k-1)*10000 + i;
labels[record_start] = in.get();
for (int j = 0; j < 3072; j++) {
data[record_start*3072+j] = MTYPE(in.get());
mean[j] += data[record_start*3072+j];
}
}
in.close();
}
char test_name[100];
strcpy(test_name, dir_name);
strcat(test_name, "test_batch.bin");
in.open(test_name, std::ifstream::in | std::ifstream::binary);
printf("reading test batch\n");
for (int i = 0; i < 10000; i++) {
record_start = 5*10000 + i;
labels[record_start] = in.get();
for (int j = 0; j < 3072; j++) {
data[record_start*3072+j] = (MTYPE)in.get();
//mean[j] += data[record_start*3072+j];
}
}
in.close();
/*
for (int j = 0; j < 3072; j++)
mean[j] /= 50000.0;
*/
ifstream in_mean;
in_mean.open("/scratch0/qwang37/cifar-10-batches-bin/data_mean.bin", std::ifstream::in | std::ifstream::binary);
if (in_mean.fail()) {
cout << "open file failed!\n";
return;
}
for (int j = 0; j < 3072; j++)
in_mean.read((char*)(mean+j), 4);
in_mean.close();
for (int i = 0; i < 60000; i++)
for (int j = 0; j < 3072; j++)
data[i*3072+j] -= mean[j];
//data[i*3072+j] -= 127.5;
ofstream out("/scratch0/qwang37/cifar-10-batches-bin/cifar_centered.bin", std::ofstream::out | std::ifstream::binary);
out.write((char*)data, 60000*3072*sizeof(MTYPE));
out.close();
}
void convertToMTYPE() {
ifstream in;
printf("starting copying data\n");
MTYPE* data = (MTYPE*) malloc(60000*3072*sizeof(MTYPE));
char* labels = (char*) malloc(60000*sizeof(char));
char dir_name[] = "/scratch0/qwang37/cifar-10-batches-bin/";
char file_name[] = "data_batch_1.bin";
char full_name[100];
int record_start;
printf("starting copy data\n");
for (int k = 1; k <= 5; k++) {
file_name[11] = '0' + k;
strcpy(full_name, dir_name);
strcat(full_name, file_name);
in.open(full_name, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
printf("open data file %d failed!\n", k);
exit(-1);
}
printf("reading batch %d\n", k);
for (int i = 0; i < 10000; i++) {
record_start = (k-1)*10000 + i;
labels[record_start] = in.get();
for (int j = 0; j < 3072; j++) {
data[record_start*3072+j] = MTYPE(in.get());
}
}
in.close();
}
char test_name[100];
strcpy(test_name, dir_name);
strcat(test_name, "test_batch.bin");
in.open(test_name);
printf("reading test batch\n");
for (int i = 0; i < 10000; i++) {
record_start = 5*10000 + i;
labels[record_start] = in.get();
for (int j = 0; j < 3072; j++) {
data[record_start*3072+j] = (MTYPE)in.get();
}
}
in.close();
ofstream out("/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", std::ofstream::out | std::ifstream::binary);
out.write((char*)data, 60000*3072*sizeof(MTYPE));
out.close();
}
void testAssembleMatrix() {
printf("start testAssembleMatrix()\n");
vector<NVMatrix> matrices(4);
Matrix tmp(2,3);
MTYPE* data = tmp.getData();
for (int i = 0; i < 4; i++)
matrices[i] = new NVMatrix(2,3);
// test1
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++)
data[j] = i * 6 + j;
tmp.setTrans(false);
matrices[i].copyFromHost(tmp, true);
}
NVMatrix rowFalse;
assembleNVMatrix(matrices, rowFalse, 0);
rowFalse.printShape("rowFalse");
rowFalse.print(8,3);
for (int i = 0; i < 4; i++) {
matrices[i].resize(0,0);
}
splitNVMatrix(matrices, rowFalse, 0);
char a[10];
for (int i = 0; i < 4; i++) {
sprintf(a, "rowFalse%d", i);
matrices[i].printShape(a);
matrices[i].print(2,3);
}
// test2
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++)
data[j] = i * 6 + j;
tmp.setTrans(true);
matrices[i].copyFromHost(tmp, true);
}
NVMatrix rowTrue;
assembleNVMatrix(matrices, rowTrue, 0);
rowTrue.printShape("rowTrue");
rowTrue.print(8,3);
for (int i = 0; i < 4; i++) {
matrices[i].resize(0,0);
}
splitNVMatrix(matrices, rowTrue, 0);
for (int i = 0; i < 4; i++) {
sprintf(a, "rowTrue%d", i);
matrices[i].printShape(a);
matrices[i].print(2,3);
}
// test3
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++)
data[j] = i * 6 + j;
tmp.setTrans(false);
matrices[i].copyFromHost(tmp, true);
}
NVMatrix colFalse;
assembleNVMatrix(matrices, colFalse, 1);
colFalse.printShape("colFalse");
colFalse.print(2,12);
for (int i = 0; i < 4; i++) {
matrices[i].resize(0,0);
}
splitNVMatrix(matrices, colFalse, 1);
for (int i = 0; i < 4; i++) {
sprintf(a, "colFalse%d", i);
matrices[i].printShape(a);
matrices[i].print(2,3);
}
// test4
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++)
data[j] = i * 6 + j;
tmp.setTrans(true);
matrices[i].copyFromHost(tmp, true);
}
NVMatrix colTrue;
assembleNVMatrix(matrices, colTrue, 1);
colTrue.printShape("colTrue");
colTrue.print(2,12);
for (int i = 0; i < 4; i++) {
matrices[i].resize(0,0);
}
splitNVMatrix(matrices, colTrue, 1);
for (int i = 0; i < 4; i++) {
sprintf(a, "colTrue%d", i);
matrices[i].printShape(a);
matrices[i].print(2,3);
}
printf("finished testAssembleMatrix()\n");
}
void testAssembleMatrix1() {
printf("start testAssembleMatrix1()\n");
vector<NVMatrix> matrices(4);
NVMatrix mat(32*32*3, 60000);
mat.setTrans(false);
NVReadFromFile(mat, "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin");
splitNVMatrix(matrices, mat, 0);
for (int i = 0; i < 4; i++) {
char a[100];
sprintf(a, "/scratch0/qwang37/cifar-10-batches-bin/testAssemble/r%d.bin", i);
NVSaveToFile(matrices[i], a);
}
NVMatrix ass;
assembleNVMatrix(matrices, ass, 0);
NVSaveToFile(ass, "/scratch0/qwang37/cifar-10-batches-bin/testAssemble.bin");
printf("completed testAssembleMatrix1()!\n");
}
void testAssembleMatrix2() {
printf("start testAssembleMatrix2()\n");
vector<NVMatrix> sub(2);
NVMatrix mat(32*32*3, 60000);
mat.setTrans(false);
NVReadFromFile(mat, "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin");
splitNVMatrix(sub[0], sub[1], mat, 1000, 2072, 0);
for (int i = 0; i < 2; i++) {
char a[100];
sprintf(a, "/scratch0/qwang37/cifar-10-batches-bin/testAssemble/r%d.bin", i);
NVSaveToFile(sub[i], a);
}
NVMatrix ass;
assembleNVMatrix(sub[0], sub[1], ass, 0);
printf("finished assembleNVMatrix!\n");
NVSaveToFile(ass, "/scratch0/qwang37/cifar-10-batches-bin/rowAssemble.bin");
splitNVMatrix(sub[0], sub[1], mat, 20000, 40000, 1);
for (int i = 0; i < 2; i++) {
char a[100];
sprintf(a, "/scratch0/qwang37/cifar-10-batches-bin/testAssemble/c%d.bin", i);
NVSaveToFile(sub[i], a);
}
NVMatrix ass1;
assembleNVMatrix(sub[0], sub[1], ass1, 1);
NVSaveToFile(ass1, "/scratch0/qwang37/cifar-10-batches-bin/colAssemble.bin");
printf("completed testAssembleMatrix2()!\n");
}
void testGenFilterMask() {
hiprandState_t* devStates = init_cuda_rand(100);
printf("start testGenFilterMask()!\n");
NVMatrix a;
genFilterMask(a, 10, 10, 0.2, devStates);
a.printShape("a");
a.print(10,10);
}
void testAbs() {
NVMatrix a;
a.resize(16*10,10);
a.setTrans(false);
a.randomizeGaussian();
NVMatrix b;
NVMask maxMask;
convCompeteAbs(a, b, maxMask, 10, 2, 2, 4, 10);
b.print(16*10, 10);
}
| 126b592ffcea9da0e8f2c4ea84df915cab6aa99e.cu | #include <iostream>
#include <fstream>
#include <string.h>
#include <stdio.h>
#include <vector>
#include <time.h>
#include <nvmatrix.cuh>
#include <cudaconv2.cuh>
#include "routines.cuh"
#include <layer_kernels.cuh>
using namespace std;
extern LayerOpt opt1, opt2, opt3, opt4, optTop;
extern FILE* pFile;
//void assignOpt();
// test blocks
void testSamplePatches() {
printf("testing samplePatches()...\n");
int patchSize = 16;
int numPatches = 10000;
int dims[3] = {32, 32, 3};
int numRecords = 1000;
char* in_name = "/scratch0/qwang37/cifar-10-batches-bin/cifar_normalized.bin";
char* out_name1 = "/scratch0/qwang37/cifar-10-batches-bin/cifar_samplePatch_test_half.bin";
samplePatches(patchSize, numPatches, dims, numRecords, in_name, out_name1);
patchSize = 32;
char* out_name2 = "/scratch0/qwang37/cifar-10-batches-bin/cifar_samplePatch_test_full.bin";
samplePatches(patchSize, numPatches, dims, numRecords, in_name, out_name2);
printf("samplePatches() test done!\n");
}
void testMult() {
printf("rightMult test\n");
MTYPE data1[6] = {0,1,2,3,4,5};
Matrix m1(data1, 2, 3, false);
NVMatrix nvm1(m1, true);
MTYPE data2[9] = {0,1,2,3,4,5,6,7,8};
Matrix m2(data2, 3, 3, false);
NVMatrix nvm2(m2, true);
NVMatrix nvm3;
nvm1.rightMult(nvm2,1);
nvm1.printShape("nvm1");
printf("nvm1 _isTrans = %d\n", nvm1.isTrans());
nvm1.print(nvm1.getNumRows(), nvm1.getNumCols());
}
void testConv() {
printf("convolution test\n");
MTYPE* data = (MTYPE*) malloc(8*3*3*2*sizeof(MTYPE));
for (int i = 1; i <= 8*3*3*2; i++) {
data[i-1] = i; // first image is a bunch of "0"s; second image is a bunch of "1"s
}
Matrix *im = new Matrix(data, 8*3*3, 2, true); // the transpose specification does not have effect here!
NVMatrix *nvim = new NVMatrix(*im, true);
printf("filters\n");
MTYPE* data2 = (MTYPE*) malloc(32*4*2*sizeof(MTYPE));
for (int i = 1; i <= 32*4*2; i++) {
data2[i-1] = i; // filters are a bunch of "1"s
}
Matrix *f = new Matrix(data2, 32*4, 2, true);
NVMatrix *nvf = new NVMatrix(*f, true);
NVMatrix *targets = new NVMatrix();
convWeightActs(*nvim, *nvf, *targets, 3, 2, 2, 2, 0, 1, 8, 2, 0);
printf("numRows: %d, numCols: %d\n", targets->getNumRows(), targets->getNumCols());
targets->print(targets->getNumRows(), targets->getNumCols());
}
void testMatrixIO() {
printf("testing Matrix IO...\n");
MTYPE data[8] = {0, 1, 2, 3, 4, 5, 6, 7};
Matrix rm(data, 2, 4, true);
Matrix cm(data, 4, 2, false);
NVMatrix NVrm(rm, true);
NVMatrix NVcm(cm, true);
NVSaveToFile(NVrm, "NVrm.bin");
NVSaveToFile(NVcm, "NVcm.bin");
/* attention: the Matrix and NVMatrix classes do not
have proper direct copying assignment operators!
assignment has to be done through reference
*/
NVMatrix NVrm1(2,4), NVcm1(4,2);
NVrm1.setTrans(true); NVcm1.setTrans(false);
NVReadFromFile(NVrm1, "NVrm.bin");
NVReadFromFile(NVcm1, "NVcm.bin");
NVrm1.printShape("NVrm1");
NVrm1.print(NVrm1.getNumRows(), NVrm1.getNumCols());
NVcm1.printShape("NVcm1");
NVcm1.print(NVcm1.getNumRows(), NVcm1.getNumCols());
printf("Matrix IO test complete!\n");
}
void testDataIO() {
printf("testing Data IO...\n");
MTYPE data[8] = {0, 1, 2, 3, 4, 5, 6, 7};
Matrix rm(data, 2, 4, true);
Matrix cm(data, 4, 2, false);
NVMatrix NVrm(rm, true);
NVMatrix NVcm(cm, true);
NVSaveToFile(NVrm, "NVm.bin", true);
NVSaveToFile(NVcm, "NVm.bin", true);
/* attention: the Matrix and NVMatrix classes do not
have proper direct copying assignment operators!
assignment has to be done through reference
*/
NVMatrix NVrm1(2,4), NVcm1(4,2);
NVrm1.setTrans(true); NVcm1.setTrans(false);
NVReadFromFile(NVrm1, "NVm.bin");
NVReadFromFile(NVcm1, "NVm.bin", 2);
NVrm1.printShape("NVrm1");
NVrm1.print(NVrm1.getNumRows(), NVrm1.getNumCols());
NVcm1.printShape("NVcm1");
NVcm1.print(NVcm1.getNumRows(), NVcm1.getNumCols());
printf("Data IO test complete!\n");
}
/*
void testTrainFCAE() {
printf("testing trainFCAE()...\n");
LayerOpt opt1;
char* layerName = "layer1";
char* df = "/scratch0/qwang37/cifar-10-batches-bin/cifar_patches.bin";
opt1.layerName = layerName;
opt1.dataFile = df;
opt1.patchSize = 5;
opt1.numChannels = 3;
opt1.numFilters = 64;
opt1.batchSize = 2000;
opt1.batchNum = 1000;
opt1.numEpochs = 100;
opt1.initstv = 0.01;
opt1.mom = 0.0;
opt1.lrW = 0.01;
opt1.lrB = 0.01;
opt1.weightDecay = 0.003;
opt1.sparseParam = 0.035;
opt1.sparseWeight = 0.0;
NVMatrix weight1; NVMatrix bias1; // parameters for the first layer
char* weightFile ="/scratch0/qwang37/cifar-10-batches-bin/filters5x5_layer1.bin";
char* biasFile = "/scratch0/qwang37/cifar-10-batches-bin/biases5x5_layer1.bin";
trainFCAE(opt1, weight1, bias1, weightFile, biasFile);
//weight1.printShape("forward weight");
//weight1.print(weight1.getNumRows(), weight1.getNumCols());
printf("trainFCAE() test complete!\n");
}
*/
/*
void testGenerateDataConv(char* poolType) {
printf("testing testGenerateDataConv(%s)...\n", poolType);
Dim dims;
dims.dataX = 32; dims.dataY = 32; dims.dataC = 3; dims.batchSize = 5000; dims.numBatches = 10;
dims.filterX = 5; dims.numFilters = 64; dims.stride = 1; dims.padding = 0;
dims.poolSize = 3; dims.poolStride = 2; dims.poolStartX = 0; strcpy(dims.pooler, poolType);
dims.poolOutX = (dims.dataX - dims.filterX + 1 - dims.poolSize) / dims.poolStride + 1;
char* sourceFile = "/scratch0/qwang37/cifar-10-batches-bin/cifar_normalized.bin";
char* destFile = (char*) malloc (100);
strcpy(destFile, "/scratch0/qwang37/cifar-10-batches-bin/cifar_layer2data_conv_");
strcat(destFile, poolType);
strcat(destFile, ".bin");
remove(destFile);
char* layerType = "conv";
NVMatrix weight(dims.filterX*dims.filterX*dims.dataC, dims.numFilters), biases(1, dims.numFilters);
NVReadFromFile(weight, "/scratch0/qwang37/cifar-10-batches-bin/filters5x5_layer1.bin");
NVReadFromFile(biases, "/scratch0/qwang37/cifar-10-batches-bin/biases5x5_layer1.bin");
generateData(sourceFile, destFile, layerType, weight, biases, dims);
printf("testGenerateDataConv() test complete!\n");
}
*/
/*
void testGenerateDataFC() {
printf("testing testGenerateDataFC()...\n");
Dim dims;
dims.dataX = 5; dims.dataY = 5; dims.dataC = 3; dims.batchSize = 100000; dims.numBatches = 5;
dims.filterX = 5; dims.numFilters = 64; dims.stride = 1; dims.padding = 0;
char* sourceFile = "/scratch0/qwang37/cifar-10-batches-bin/cifar_patches.bin";
char* destFile = "/scratch0/qwang37/cifar-10-batches-bin/cifar_layer2data_FC.bin";
char* layerType = "FC";
NVMatrix weight(dims.filterX*dims.filterX*dims.dataC, dims.numFilters), biases(1, dims.numFilters);
NVReadFromFile(weight, "/scratch0/qwang37/cifar-10-batches-bin/filters5x5_layer1.bin");
NVReadFromFile(biases, "/scratch0/qwang37/cifar-10-batches-bin/biases5x5_layer1.bin");
remove(destFile);
generateData(sourceFile, destFile, layerType, weight, biases, dims);
printf("testGenerateDataFC() test complete!\n");
}
*/
void testNVLabelReadFromFile() {
printf("testing NVLabelReadFromFile()...\n");
NVMatrix labels(10,10);
labels.setTrans(false);
NVLabelReadFromFile(labels, "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", 101);
labels.printShape("labels");
labels.print(10,10);
printf("NVLabelReadFromFile() test complete!\n");
}
void testNVRawLabelReadFromFile() {
printf("testing NVRawLabelReadFromFile()...\n");
NVMatrix labels(1,10);
NVRawLabelReadFromFile(labels, "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", 101);
labels.printShape("labels");
labels.print(1,10);
printf("NVRawLabelReadFromFile() test complete!\n");
}
void finetune_rnorm() {
////assignOpt();
printf("starting finetune_rnorm()!\n");
fprintf(pFile, "starting finetune_rnorm!\n");
// initialize cublas
cudaSetDevice(cutGetMaxGflopsDeviceId());
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cublasInit();
// data and parameters storage
NVMatrix act1, act1Pool, act1PoolNorm, act1Denom;
NVMatrix act2, act2Norm, act2NormPool, act2Denom;
NVMatrix act3;
NVMatrix act4;
NVMatrix actTop;
NVMatrix act1Grad, act1PoolGrad, act1PoolNormGrad;
NVMatrix act2Grad, act2NormGrad, act2NormPoolGrad;
NVMatrix act3Grad;
NVMatrix act4Grad;
NVMatrix actTopGrad;
NVMatrix weight1, weight2, weight3, weight4, weightTop;
NVMatrix weight1Grad, weight2Grad, weight3Grad, weight4Grad, weightTopGrad;
NVMatrix weight1Inc, weight2Inc, weight3Inc, weight4Inc, weightTopInc;
NVMatrix weight1GradTmp, weight2GradTmp, weight3GradTmp, weight4GradTmp, weightTopGradTmp;
NVMatrix bias1, bias2, bias3, bias4, biasTop; // bias4 is just an all-zero dummy vector
NVMatrix bias1Grad, bias2Grad, bias3Grad, bias4Grad, biasTopGrad;
NVMatrix bias1Inc, bias2Inc, bias3Inc, bias4Inc, biasTopInc;
// initialize parameters
if (opt1.loadParam) {
weight1.resize(opt1.numVis, opt1.numFilters);
weight2.resize(opt2.numVis, opt2.numFilters);
weight3.resize(opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
weight4.resize(opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters);
weightTop.resize(optTop.numVis, optTop.numFilters);
bias1.resize(opt1.numFilters, 1);
bias2.resize(opt2.numFilters, 1);
bias3.resize(opt3.numFilters * opt3.outX * opt3.outX, 1);
bias4.resize(opt4.numFilters * opt4.outX * opt4.outX, 1);
biasTop.resize(1, optTop.numFilters);
biasTop.setTrans(true);
NVReadFromFile(weight1, "/scratch0/qwang37/cifar-10-batches-bin/weight1.bin");
NVReadFromFile(weight2, "/scratch0/qwang37/cifar-10-batches-bin/weight2.bin");
NVReadFromFile(weight3, "/scratch0/qwang37/cifar-10-batches-bin/weight3.bin");
NVReadFromFile(weight4, "/scratch0/qwang37/cifar-10-batches-bin/weight4.bin");
NVReadFromFile(weightTop, "/scratch0/qwang37/cifar-10-batches-bin/weightTop.bin");
NVReadFromFile(bias1, "/scratch0/qwang37/cifar-10-batches-bin/bias1.bin");
NVReadFromFile(bias2, "/scratch0/qwang37/cifar-10-batches-bin/bias2.bin");
NVReadFromFile(bias3, "/scratch0/qwang37/cifar-10-batches-bin/bias3.bin");
NVReadFromFile(bias4, "/scratch0/qwang37/cifar-10-batches-bin/bias4.bin");
NVReadFromFile(biasTop, "/scratch0/qwang37/cifar-10-batches-bin/biasTop.bin");
}
else {
initWeights(weight1, opt1.numVis, opt1.numFilters, false, opt1.initstv);
initWeights(weight2, opt2.numVis, opt2.numFilters, false, opt2.initstv);
initWeights(weight3, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, opt3.initstv);
initWeights(weight4, opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters, false, opt4.initstv);
initWeights(weightTop, optTop.numVis, optTop.numFilters, false, optTop.initstv);
initWeights(bias1, opt1.numFilters, 1, false, 0.0);
initWeights(bias2, opt2.numFilters, 1, false, 0.0);
initWeights(bias3, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0);
initWeights(bias4, opt4.numFilters * opt4.outX * opt4.outX, 1, false, 0.0);
initWeights(biasTop, 1, optTop.numFilters, true, 0.0);
}
initWeights(weight1Inc, opt1.numVis, opt1.numFilters, false, 0.0); initWeights(weight1Grad, opt1.numVis, opt1.numFilters, false, 0.0);
initWeights(weight2Inc, opt2.numVis, opt2.numFilters, false, 0.0); initWeights(weight2Grad, opt2.numVis, opt2.numFilters, false, 0.0);
initWeights(weight3Inc, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, 0.0); initWeights(weight3Grad, opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters, false, 0.0); // not useful for 3 and 4
initWeights(weight4Inc, opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters, false, 0.0); initWeights(weight4Grad, opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters, false, 0.0);
initWeights(weightTopInc, optTop.numVis, optTop.numFilters, false, 0.0); initWeights(weightTopGrad, optTop.numVis, optTop.numFilters, false, 0.0);
initWeights(bias1Inc, opt1.numFilters, 1, false, 0.0); initWeights(bias1Grad, opt1.numFilters, 1, false, 0.0);
initWeights(bias2Inc, opt2.numFilters, 1, false, 0.0); initWeights(bias2Grad, opt2.numFilters, 1, false, 0.0);
initWeights(bias3Inc, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0); initWeights(bias3Grad, opt3.numFilters * opt3.outX * opt3.outX, 1, false, 0.0); // not useful for 3
initWeights(bias4Inc, opt4.numFilters * opt4.outX * opt4.outX, 1, false, 0.0); initWeights(bias4Grad, opt4.numFilters * opt4.outX * opt4.outX, 1, false, 0.0);
initWeights(biasTopInc, 1, opt1.labelSize, true, 0.0); initWeights(biasTopGrad, 1, opt1.labelSize, true, 0.0);
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int trainBatchNum = opt1.numTrain / batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTrain(trainBatchNum), CPUTest(testBatchNum);
vector<NVMatrix*> GPUTrain(trainBatchNum), GPUTest(testBatchNum);
vector<NVMatrix*> GPURawLabelTrain(trainBatchNum), GPURawLabelTest(testBatchNum);
for (int batch = 0; batch < trainBatchNum; batch++) {
CPUTrain[batch] = new Matrix(imPixels, batchSize);
CPUTrain[batch]->setTrans(false);
GPUTrain[batch] = new NVMatrix();
hmReadFromFile(*CPUTrain[batch], opt1.dataPath + "/cifar_raw.bin", batch*batchSize);
GPURawLabelTrain[batch] = new NVMatrix(1, batchSize);
GPURawLabelTrain[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], opt1.dataPath + "/cifar_labels.bin", batch*batchSize);
}
batchSize = opt1.numTrain % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTrain.push_back(new Matrix(imPixels, batchSize));
CPUTrain.back()->setTrans(false);
GPUTrain.push_back(new NVMatrix());
hmReadFromFile(*CPUTrain.back(), opt1.dataPath + "/cifar_raw.bin", trainBatchNum*batchSize);
GPURawLabelTrain.push_back(new NVMatrix(1, batchSize));
GPURawLabelTrain.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), opt1.dataPath + "/cifar_labels.bin", trainBatchNum*batchSize);
}
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
GPUTest[batch] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
NVMatrix trueLabelLogProbs;
NVMatrix correctProbs;
MTYPE cost; // as before, we trace the performance using the cost variable
MTYPE cost1;
NVMatrix absM;
MTYPE weightAbs1, weightAbs2, weightAbs3, weightAbs4, weightAbsTop;
MTYPE biasAbs1, biasAbs2, biasAbs3, biasAbs4, biasAbsTop;
MTYPE weightGradAbs1, weightGradAbs2, weightGradAbs3, weightGradAbs4, weightGradAbsTop;
MTYPE biasGradAbs1, biasGradAbs2, biasGradAbs3, biasGradAbs4, biasGradAbsTop;
clock_t startClock;
clock_t tick;
float lr_scale = 1.0, mom_scale = 1.0;
cropDataProvider(CPUTest, GPUTest, opt1, true, opt1.whitened); // test data is fixed
for (int epoch = 0; epoch < opt1.numEpochs; epoch++) {
cost = 0;
cost1 = 0;
cropDataProvider(CPUTrain, GPUTrain, opt1, false, opt1.whitened); // copy data to the GPU side
cudaThreadSynchronize();
startClock = clock();
for (int batch = 0; batch < GPUTrain.size(); batch++) {
batchSize = GPUTrain[batch]->getNumCols();
// ====forward pass====
// 0->1
//cout << "0->1\n";
//original
activateConv(*GPUTrain[batch], act1, weight1, bias1, opt1);
act1.apply(ReluOperator());
act1Pool.transpose(false);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convResponseNormCrossMap(act1Pool, act1Denom, act1PoolNorm, opt1.numFilters, opt1.sizeF, opt1.addScale/opt1.sizeF, opt1.powScale, false);
// 1->2
//cout << "1->2\n";
//original
activateConv(act1PoolNorm, act2, weight2, bias2, opt2);
act2.apply(ReluOperator());
convResponseNormCrossMap(act2, act2Denom, act2Norm, opt2.numFilters, opt2.sizeF, opt2.addScale/opt2.sizeF, opt2.powScale, false);
act2NormPool.transpose(false);
convLocalPool(act2Norm, act2NormPool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
// 2->3
//cout << "2->3\n";
// original
activateLocal(act2NormPool, act3, weight3, bias3, opt3);
act3.apply(ReluOperator());
// 3->4
//cout << "3->4\n";
// original
activateLocal(act3, act4, weight4, bias4, opt4);
act4.apply(ReluOperator());
// 4->top
//cout << "4->top\n";
actTop.transpose(true);
actTop.resize(batchSize, opt1.labelSize);
activate(act4, actTop, weightTop, biasTop, 0, 1);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
// compute cost
computeLogregSoftmaxGrad(*GPURawLabelTrain[batch], actTop, actTopGrad, false, 1);
actTop.transpose(false);
computeLogregCost(*GPURawLabelTrain[batch], actTop, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum() / batchSize;
cost1 += trueLabelLogProbs.sum() / batchSize;
// ====== back pass ======
// top -> 4, 3, 2, 1
//cout << "top -> 4, 3, 2, 1";
// weight update
NVMatrix& act4T = act4.getTranspose();
weightTopGrad.addProduct(act4T, actTopGrad, 0, 1);
biasTopGrad.addSum(actTopGrad, 0, 0, 1);
delete &act4T;
// bp
actTopGrad.transpose(true);
NVMatrix& weightTopT = weightTop.getTranspose();
act4Grad.addProduct(actTopGrad, weightTopT, 0, 1);
delete &weightTopT;
// 4->3
//cout << "4->3\n";
act4Grad.transpose(false); // convert back to row-major
act4.transpose(false);
act4Grad.applyBinary(ReluGradientOperator(), act4);
localWeightActs(act3, act4Grad, weight4Grad, opt4.imSize, opt4.outX, opt4.outX, opt4.patchSize, opt4.paddingStart, 1, opt4.numChannels, 1);
bias4Grad.addSum(act4Grad, 1, 0, 1);
localImgActs(act4Grad, weight4, act3Grad, opt4.imSize, opt4.imSize, opt4.outX, opt4.paddingStart, 1, opt4.numChannels, 1);
// 3->2
//cout << "3->2\n";
// original part
act3Grad.transpose(false); // convert back to row-major
act3.transpose(false);
act3Grad.applyBinary(ReluGradientOperator(), act3);
localWeightActs(act2NormPool, act3Grad, weight3Grad, opt3.imSize, opt3.outX, opt3.outX, opt3.patchSize, opt3.paddingStart, 1, opt3.numChannels, 1);
bias3Grad.addSum(act3Grad, 1, 0, 1);
localImgActs(act3Grad, weight3, act2NormPoolGrad, opt3.imSize, opt3.imSize, opt3.outX, opt3.paddingStart, 1, opt3.numChannels, 1);
// 2->1
//cout << "2->1\n";
// original part
act2NormPoolGrad.transpose(false);
act2NormPool.transpose(false);
convLocalMaxUndo(act2Norm, act2NormPoolGrad, act2NormPool, act2NormGrad, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX);
convResponseNormCrossMapUndo(act2NormGrad, act2Denom, act2, act2Norm, act2Grad, opt2.numFilters, opt2.sizeF, opt2.addScale/opt2.sizeF, opt2.powScale, false, 0, 1);
act2Grad.applyBinary(ReluGradientOperator(), act2);
convWeightActs(act1PoolNorm, act2Grad, weight2GradTmp, opt2.imSize, opt2.outX, opt2.outX, opt2.patchSize, opt2.paddingStart, 1, opt2.numChannels, 1, opt2.partialSum);
weight2GradTmp.reshape(opt2.outX * opt2.outX / opt2.partialSum, opt2.numChannels * opt2.patchSize * opt2.patchSize * opt2.numFilters);
weight2Grad.addSum(weight2GradTmp, 0, 0, 1);
weight2Grad.reshape(opt2.numChannels * opt2.patchSize * opt2.patchSize, opt2.numFilters);
act2Grad.reshape(opt2.numFilters, opt2.outX * opt2.outX * batchSize);
bias2Grad.addSum(act2Grad, 1, 0, 1);
act2Grad.reshape(opt2.numFilters * opt2.outX * opt2.outX, batchSize);
convImgActs(act2Grad, weight2, act1PoolNormGrad, opt2.imSize, opt2.imSize, opt2.outX, opt2.paddingStart, 1, opt2.numChannels, 1);
// 1->0
//cout << "1->0\n";
// original part
act1PoolNormGrad.transpose(false);
act1PoolNorm.transpose(false);
convResponseNormCrossMapUndo(act1PoolNormGrad, act1Denom, act1Pool, act1PoolNorm, act1PoolGrad, opt1.numFilters, opt1.sizeF, opt1.addScale/opt1.sizeF, opt1.powScale, false, 0, 1);
convLocalMaxUndo(act1, act1PoolGrad, act1Pool, act1Grad, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX);
act1Grad.applyBinary(ReluGradientOperator(), act1);
convWeightActs(*GPUTrain[batch], act1Grad, weight1GradTmp, opt1.imSize, opt1.outX, opt1.outX, opt1.patchSize, opt1.paddingStart, 1, opt1.numChannels, 1, opt1.partialSum);
weight1GradTmp.reshape(opt1.outX * opt1.outX / opt1.partialSum, opt1.numChannels * opt1.patchSize * opt1.patchSize * opt1.numFilters);
weight1Grad.addSum(weight1GradTmp, 0, 0, 1);
weight1Grad.reshape(opt1.numChannels * opt1.patchSize * opt1.patchSize, opt1.numFilters);
act1Grad.reshape(opt1.numFilters, opt1.outX * opt1.outX * batchSize);
bias1Grad.addSum(act1Grad, 1, 0, 1);
act1Grad.reshape(opt1.numFilters * opt1.outX * opt1.outX, batchSize);
// update
lr_scale = lrDecay(lr_scale, opt1.lrDecayType, opt1.lrDecayFactor, opt1.lrMinRate);
mom_scale = momInc(mom_scale, opt1.momIncType, opt1.momIncFactor, opt1.momMaxRate);
updateWeight(weight1Grad, weight1Inc, weight1, opt1, batchSize, lr_scale, mom_scale);
updateWeight(weight2Grad, weight2Inc, weight2, opt2, batchSize, lr_scale, mom_scale);
updateWeight(weight3Grad, weight3Inc, weight3, opt3, batchSize, lr_scale, mom_scale);
updateWeight(weight4Grad, weight4Inc, weight4, opt4, batchSize, lr_scale, mom_scale);
updateWeight(weightTopGrad, weightTopInc, weightTop, optTop, batchSize, lr_scale, mom_scale);
updateBias(bias1Grad, bias1Inc, bias1, opt1, batchSize, lr_scale, mom_scale);
updateBias(bias2Grad, bias2Inc, bias2, opt2, batchSize, lr_scale, mom_scale);
updateBias(bias3Grad, bias3Inc, bias3, opt3, batchSize, lr_scale, mom_scale);
updateBias(bias4Grad, bias4Inc, bias4, opt4, batchSize, lr_scale, mom_scale);
updateBias(biasTopGrad, biasTopInc, biasTop, optTop, batchSize, lr_scale, mom_scale);
} // for (int epoch = 0; epoch < opt1.numEpochs; epoch++)
cudaThreadSynchronize();
cost /= CPUTrain.size();
cost1 /= CPUTrain.size();
printf("\nfinished epoch %d of %d; classify precision = %f; objective = %f; elapsed time = %f seconds\n", epoch, opt1.numEpochs,
cost, cost1, (float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "\nfinished epoch %d of %d; classify precision = %f; objective = %f; elapsed time = %f seconds\n", epoch, opt1.numEpochs,
cost, cost1, (float)(clock() - startClock)/CLOCKS_PER_SEC);
/*
weight1.apply(NVMatrixOps::Abs(), absM);
weightAbs1 = absM.sum() / absM.getNumElements();
weight2.apply(NVMatrixOps::Abs(), absM);
weightAbs2 = absM.sum() / absM.getNumElements();
weight3.apply(NVMatrixOps::Abs(), absM);
weightAbs3 = absM.sum() / absM.getNumElements();
weight4.apply(NVMatrixOps::Abs(), absM);
weightAbs4 = absM.sum() / absM.getNumElements();
weightTop.apply(NVMatrixOps::Abs(), absM);
weightAbsTop = absM.sum() / absM.getNumElements();
weight1Inc.apply(NVMatrixOps::Abs(), absM);
weightGradAbs1 = absM.sum() / absM.getNumElements();
weight2Inc.apply(NVMatrixOps::Abs(), absM);
weightGradAbs2 = absM.sum() / absM.getNumElements();
weight3Inc.apply(NVMatrixOps::Abs(), absM);
weightGradAbs3 = absM.sum() / absM.getNumElements();
weight4Inc.apply(NVMatrixOps::Abs(), absM);
weightGradAbs4 = absM.sum() / absM.getNumElements();
weightTopInc.apply(NVMatrixOps::Abs(), absM);
weightGradAbsTop = absM.sum() / absM.getNumElements();
bias1.apply(NVMatrixOps::Abs(), absM);
biasAbs1 = absM.sum() / absM.getNumElements();
bias2.apply(NVMatrixOps::Abs(), absM);
biasAbs2 = absM.sum() / absM.getNumElements();
bias3.apply(NVMatrixOps::Abs(), absM);
biasAbs3 = absM.sum() / absM.getNumElements();
bias4.apply(NVMatrixOps::Abs(), absM);
biasAbs4 = absM.sum() / absM.getNumElements();
biasTop.apply(NVMatrixOps::Abs(), absM);
biasAbsTop = absM.sum() / absM.getNumElements();
bias1Inc.apply(NVMatrixOps::Abs(), absM);
biasGradAbs1 = absM.sum() / absM.getNumElements();
bias2Inc.apply(NVMatrixOps::Abs(), absM);
biasGradAbs2 = absM.sum() / absM.getNumElements();
bias3Inc.apply(NVMatrixOps::Abs(), absM);
biasGradAbs3 = absM.sum() / absM.getNumElements();
bias4Inc.apply(NVMatrixOps::Abs(), absM);
biasGradAbs4 = absM.sum() / absM.getNumElements();
biasTopInc.apply(NVMatrixOps::Abs(), absM);
biasGradAbsTop = absM.sum() / absM.getNumElements();
printf("weight abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", weightAbs1, weightAbs2, weightAbs3, weightAbs4, weightAbsTop);
printf("weight grad abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", weightGradAbs1, weightGradAbs2, weightGradAbs3, weightGradAbs4, weightGradAbsTop);
printf("bias abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", biasAbs1, biasAbs2, biasAbs3, biasAbs4, biasAbsTop);
printf("bias grad abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", biasGradAbs1, biasGradAbs2, biasGradAbs3, biasGradAbs4, biasGradAbsTop);
fprintf(pFile, "weight abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", weightAbs1, weightAbs2, weightAbs3, weightAbs4, weightAbsTop);
fprintf(pFile, "weight grad abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", weightGradAbs1, weightGradAbs2, weightGradAbs3, weightGradAbs4, weightGradAbsTop);
fprintf(pFile, "bias abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", biasAbs1, biasAbs2, biasAbs3, biasAbs4, biasAbsTop);
fprintf(pFile, "bias grad abs: 1--%f, 2--%f, 3--%f, 4--%f, top--%f\n", biasGradAbs1, biasGradAbs2, biasGradAbs3, biasGradAbs4, biasGradAbsTop);
*/
// process the test set every 3 epochs
if (epoch % 3 == 2) {
cudaThreadSynchronize();
startClock = clock();
cost = 0;
cost1 = 0;
for (int batch = 0; batch < GPUTest.size(); batch++) {
batchSize = GPUTest[batch]->getNumCols();
// ====forward pass====
// 0->1
//cout << "0->1\n";
//original
activateConv(*GPUTest[batch], act1, weight1, bias1, opt1);
act1.apply(ReluOperator());
act1Pool.transpose(false);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convResponseNormCrossMap(act1Pool, act1Denom, act1PoolNorm, opt1.numFilters, opt1.sizeF, opt1.addScale/opt1.sizeF, opt1.powScale, false);
// 1->2
//cout << "1->2\n";
//original
activateConv(act1PoolNorm, act2, weight2, bias2, opt2);
act2.apply(ReluOperator());
convResponseNormCrossMap(act2, act2Denom, act2Norm, opt2.numFilters, opt2.sizeF, opt2.addScale/opt2.sizeF, opt2.powScale, false);
act2NormPool.transpose(false);
convLocalPool(act2Norm, act2NormPool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
// 2->3
//cout << "2->3\n";
// original
activateLocal(act2NormPool, act3, weight3, bias3, opt3);
act3.apply(ReluOperator());
// 3->4
//cout << "3->4\n";
// original
activateLocal(act3, act4, weight4, bias4, opt4);
act4.apply(ReluOperator());
// 4->top
//cout << "4->top\n";
actTop.transpose(true);
actTop.resize(batchSize, opt1.labelSize);
activate(act4, actTop, weightTop, biasTop, 0, 1);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
// compute cost
computeLogregSoftmaxGrad(*GPURawLabelTest[batch], actTop, actTopGrad, false, 1);
actTop.transpose(false);
computeLogregCost(*GPURawLabelTest[batch], actTop, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum() / batchSize;
cost1 += trueLabelLogProbs.sum() / batchSize;
} //for (int batch = opt1.batchNum; batch < opt1.batchNum+opt1.testBatchNum; batch++)
cudaThreadSynchronize();
cost /= GPUTest.size();
cost1 /= GPUTest.size();
printf("\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
// save checkpoint
char* weight1File = "/scratch0/qwang37/cifar-10-batches-bin/weight1.bin", *bias1File = "/scratch0/qwang37/cifar-10-batches-bin/bias1.bin";
char* weight2File = "/scratch0/qwang37/cifar-10-batches-bin/weight2.bin", *bias2File = "/scratch0/qwang37/cifar-10-batches-bin/bias2.bin";
char* weight3File = "/scratch0/qwang37/cifar-10-batches-bin/weight3.bin", *bias3File = "/scratch0/qwang37/cifar-10-batches-bin/bias3.bin";
char* weight4File = "/scratch0/qwang37/cifar-10-batches-bin/weight4.bin", *bias4File = "/scratch0/qwang37/cifar-10-batches-bin/bias4.bin";
char* weightTopFile = "/scratch0/qwang37/cifar-10-batches-bin/weightTop.bin", *biasTopFile = "/scratch0/qwang37/cifar-10-batches-bin/biasTop.bin";
NVSaveToFile(weight1, weight1File); NVSaveToFile(bias1, bias1File);
NVSaveToFile(weight2, weight2File); NVSaveToFile(bias2, bias2File);
NVSaveToFile(weight3, weight3File); NVSaveToFile(bias3, bias3File);
NVSaveToFile(weight4, weight4File); NVSaveToFile(bias4, bias4File);
NVSaveToFile(weightTop, weightTopFile); NVSaveToFile(biasTop, biasTopFile);
printf("Checkpoint saved!\n\n");
fprintf(pFile, "Checkpoint saved!\n\n");
} //if (epoch % 10 == 0)
} // for (int epoch = 0; epoch < opt1.numEpochs; epoch++)
printf("finetuning_rnorm() complete!\n");
fprintf(pFile, "finetuning_rnorm() complete!\n");
} // int finetune_rnorm()
void multiViewTest() {
////assignOpt();
printf("starting multiViewTest()!\n");
fprintf(pFile, "starting multiViewTest()!\n");
// initialize cublas
cudaSetDevice(cutGetMaxGflopsDeviceId());
cudaDeviceSetCacheConfig(cudaFuncCachePreferShared);
cublasInit();
// data and parameters storage
NVMatrix act1, act1Pool, act1PoolNorm, act1Denom;
NVMatrix act2, act2Norm, act2NormPool, act2Denom;
NVMatrix act3;
NVMatrix act4;
NVMatrix actTop;
NVMatrix softMaxAct;
NVMatrix weight1, weight2, weight3, weight4, weightTop;
NVMatrix bias1, bias2, bias3, bias4, biasTop; // bias4 is just an all-zero dummy vector
// initialize parameters
weight1.resize(opt1.numVis, opt1.numFilters);
weight2.resize(opt2.numVis, opt2.numFilters);
weight3.resize(opt3.numVis * opt3.outX * opt3.outX, opt3.numFilters);
weight4.resize(opt4.numVis * opt4.outX * opt4.outX, opt4.numFilters);
weightTop.resize(optTop.numVis, optTop.numFilters);
bias1.resize(opt1.numFilters, 1);
bias2.resize(opt2.numFilters, 1);
bias3.resize(opt3.numFilters * opt3.outX * opt3.outX, 1);
bias4.resize(opt4.numFilters * opt4.outX * opt4.outX, 1);
biasTop.resize(1, optTop.numFilters);
biasTop.setTrans(true);
NVReadFromFile(weight1, "/scratch0/qwang37/cifar-10-batches-bin/weight1.bin");
NVReadFromFile(weight2, "/scratch0/qwang37/cifar-10-batches-bin/weight2.bin");
NVReadFromFile(weight3, "/scratch0/qwang37/cifar-10-batches-bin/weight3.bin");
NVReadFromFile(weight4, "/scratch0/qwang37/cifar-10-batches-bin/weight4.bin");
NVReadFromFile(weightTop, "/scratch0/qwang37/cifar-10-batches-bin/weightTop.bin");
NVReadFromFile(bias1, "/scratch0/qwang37/cifar-10-batches-bin/bias1.bin");
NVReadFromFile(bias2, "/scratch0/qwang37/cifar-10-batches-bin/bias2.bin");
NVReadFromFile(bias3, "/scratch0/qwang37/cifar-10-batches-bin/bias3.bin");
NVReadFromFile(bias4, "/scratch0/qwang37/cifar-10-batches-bin/bias4.bin");
NVReadFromFile(biasTop, "/scratch0/qwang37/cifar-10-batches-bin/biasTop.bin");
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTest(testBatchNum);
vector<NVMatrix*> GPUTest(testBatchNum*opt1.numViews);
vector<NVMatrix*> GPURawLabelTest(testBatchNum);
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
for (int r = 0; r < opt1.numViews; r++)
GPUTest[batch*opt1.numViews+r] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
for (int r = 0; r < opt1.numViews; r++)
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), opt1.dataPath + "/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), opt1.dataPath + "/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
multiViewDataProvider(CPUTest, GPUTest, opt1, opt1.numViews, opt1.whitened); // copy data to the GPU side
NVMatrix trueLabelLogProbs;
NVMatrix correctProbs;
MTYPE cost; // as before, we trace the performance using the cost variable
MTYPE cost1;
clock_t startClock;
clock_t tick;
cost = 0;
cost1 = 0;
cudaThreadSynchronize();
startClock = clock();
for (int batch = 0; batch < CPUTest.size(); batch++) {
batchSize = CPUTest[batch]->getNumCols();
for (int r = 0; r < opt1.numViews; r++) {
// ====forward pass====
// 0->1
//cout << "0->1\n";
//original
activateConv(*GPUTest[batch*opt1.numViews+r], act1, weight1, bias1, opt1);
act1.apply(ReluOperator());
act1Pool.transpose(false);
convLocalPool(act1, act1Pool, opt1.numFilters, opt1.poolSize, opt1.poolStartX, opt1.poolStride, opt1.poolOutX, MaxPooler());
convResponseNormCrossMap(act1Pool, act1Denom, act1PoolNorm, opt1.numFilters, opt1.sizeF, opt1.addScale/opt1.sizeF, opt1.powScale, false);
// 1->2
//cout << "1->2\n";
//original
activateConv(act1PoolNorm, act2, weight2, bias2, opt2);
act2.apply(ReluOperator());
convResponseNormCrossMap(act2, act2Denom, act2Norm, opt2.numFilters, opt2.sizeF, opt2.addScale/opt2.sizeF, opt2.powScale, false);
act2NormPool.transpose(false);
convLocalPool(act2Norm, act2NormPool, opt2.numFilters, opt2.poolSize, opt2.poolStartX, opt2.poolStride, opt2.poolOutX, MaxPooler());
// 2->3
//cout << "2->3\n";
// original
activateLocal(act2NormPool, act3, weight3, bias3, opt3);
act3.apply(ReluOperator());
// 3->4
//cout << "3->4\n";
// original
activateLocal(act3, act4, weight4, bias4, opt4);
act4.apply(ReluOperator());
// 4->top
//cout << "4->top\n";
actTop.transpose(true);
actTop.resize(batchSize, opt1.labelSize);
activate(act4, actTop, weightTop, biasTop, 0, 1);
//softmax layer
NVMatrix& max = actTop.max(1);
actTop.addVector(max, -1);
actTop.apply(NVMatrixOps::Exp());
NVMatrix& sum = actTop.sum(1);
actTop.eltwiseDivideByVector(sum);
delete &max;
delete ∑
actTop.transpose(false);
if (r == 0)
actTop.copy(softMaxAct);
else
softMaxAct.add(actTop);
}// for (r = 0:9)
softMaxAct.scale(0.1);
computeLogregCost(*GPURawLabelTest[batch], softMaxAct, trueLabelLogProbs, correctProbs); //labelLogProbs:(1, numCases); correctProbs:(1, numCases)
cost += correctProbs.sum();
cost1 += trueLabelLogProbs.sum();
}//for (batches)
cudaThreadSynchronize();
cost /= opt1.numTest;
cost1 /= opt1.numTest;
printf("\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
printf("multiViewTest() complete!\n");
fprintf(pFile, "\ntest set precision: %f\n; objective = %f; time elapsed = %f seconds\n", cost, cost1,
(float)(clock() - startClock)/CLOCKS_PER_SEC);
fprintf(pFile, "multiViewTest() complete!\n");
} // void multiViewTest()
void testCropDataProvider() {
//assignOpt();
printf("starting test cropDataProvider()!\n");
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int trainBatchNum = opt1.numTrain / batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTrain(trainBatchNum), CPUTest(testBatchNum);
vector<NVMatrix*> GPUTrain(trainBatchNum), GPUTest(testBatchNum);
vector<NVMatrix*> GPURawLabelTrain(trainBatchNum), GPURawLabelTest(testBatchNum);
for (int batch = 0; batch < trainBatchNum; batch++) {
CPUTrain[batch] = new Matrix(imPixels, batchSize);
CPUTrain[batch]->setTrans(false);
GPUTrain[batch] = new NVMatrix();
hmReadFromFile(*CPUTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", batch*batchSize);
GPURawLabelTrain[batch] = new NVMatrix(1, batchSize);
GPURawLabelTrain[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", batch*batchSize);
}
batchSize = opt1.numTrain % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTrain.push_back(new Matrix(imPixels, batchSize));
CPUTrain.back()->setTrans(false);
GPUTrain.push_back(new NVMatrix());
hmReadFromFile(*CPUTrain.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", trainBatchNum*batchSize);
GPURawLabelTrain.push_back(new NVMatrix(1, batchSize));
GPURawLabelTrain.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", trainBatchNum*batchSize);
}
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
GPUTest[batch] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
cropDataProvider(CPUTrain, GPUTrain, opt1, false, false);
remove("/scratch0/qwang37/cifar-10-batches-bin/test_out.bin");
for (int batch = 0; batch < GPUTrain.size(); batch++) {
NVSaveToFile(*GPUTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/test_out.bin", true);
}
printf("cropDataProvider() test done!\n");
}
void testNVReadFromFileUint8() {
//assignOpt();
printf("starting test testNVReadFromFileUint8()!\n");
// read data to host memory (and labels to the GPU memory)
int imPixels = 32*32*opt1.numChannels;
int batchSize = opt1.batchSize;
int trainBatchNum = opt1.numTrain / batchSize;
int testBatchNum = opt1.numTest / batchSize;
vector<Matrix*> CPUTrain(trainBatchNum), CPUTest(testBatchNum);
vector<NVMatrix*> GPUTrain(trainBatchNum), GPUTest(testBatchNum);
vector<NVMatrix*> GPURawLabelTrain(trainBatchNum), GPURawLabelTest(testBatchNum);
for (int batch = 0; batch < trainBatchNum; batch++) {
CPUTrain[batch] = new Matrix(imPixels, batchSize);
CPUTrain[batch]->setTrans(false);
GPUTrain[batch] = new NVMatrix();
hmReadFromFile(*CPUTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", batch*batchSize);
GPURawLabelTrain[batch] = new NVMatrix(1, batchSize);
GPURawLabelTrain[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", batch*batchSize);
}
batchSize = opt1.numTrain % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTrain.push_back(new Matrix(imPixels, batchSize));
CPUTrain.back()->setTrans(false);
GPUTrain.push_back(new NVMatrix());
hmReadFromFile(*CPUTrain.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", trainBatchNum*batchSize);
GPURawLabelTrain.push_back(new NVMatrix(1, batchSize));
GPURawLabelTrain.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTrain.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", trainBatchNum*batchSize);
}
// test set
batchSize = opt1.batchSize;
for (int batch = 0; batch < testBatchNum; batch++) {
CPUTest[batch] = new Matrix(imPixels, batchSize);
CPUTest[batch]->setTrans(false);
GPUTest[batch] = new NVMatrix();
hmReadFromFile(*CPUTest[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", opt1.numTrain+batch*batchSize);
GPURawLabelTest[batch] = new NVMatrix(1, batchSize);
GPURawLabelTest[batch]->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest[batch], "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", opt1.numTrain+batch*batchSize);
}
batchSize = opt1.numTest % opt1.batchSize; // the last batch
if (batchSize > 0) {
CPUTest.push_back(new Matrix(imPixels, batchSize));
CPUTest.back()->setTrans(false);
GPUTest.push_back(new NVMatrix());
hmReadFromFile(*CPUTest.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", opt1.numTrain+testBatchNum*batchSize);
GPURawLabelTest.push_back(new NVMatrix(1, batchSize));
GPURawLabelTest.back()->setTrans(false);
NVRawLabelReadFromFile(*GPURawLabelTest.back(), "/scratch0/qwang37/cifar-10-batches-bin/cifar_labels.bin", opt1.numTrain+testBatchNum*batchSize);
}
cropDataProvider(CPUTrain, GPUTrain, opt1, false, false);
remove("/scratch0/qwang37/cifar-10-batches-bin/test_out.bin");
for (int batch = 0; batch < GPUTrain.size(); batch++) {
NVSaveToFile(*GPUTrain[batch], "/scratch0/qwang37/cifar-10-batches-bin/test_out.bin", true);
}
printf("testNVReadFromFileUint8() test done!\n");
}
void centerData() {
ifstream in;
printf("starting centering data\n");
MTYPE* data = (MTYPE*) malloc(60000*3072*sizeof(MTYPE));
MTYPE* mean = (MTYPE*) malloc(3072*sizeof(MTYPE));
char* labels = (char*) malloc(60000*sizeof(char));
char dir_name[] = "/scratch0/qwang37/cifar-10-batches-bin/";
char file_name[] = "data_batch_1.bin";
char full_name[100];
int record_start;
for (int j = 0; j < 3072; j++)
mean[j] = 0.0;
for (int k = 1; k <= 5; k++) {
file_name[11] = '0' + k;
strcpy(full_name, dir_name);
strcat(full_name, file_name);
in.open(full_name, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
printf("open data file %d failed!\n", k);
exit(-1);
}
printf("reading batch %d\n", k);
for (int i = 0; i < 10000; i++) {
record_start = (k-1)*10000 + i;
labels[record_start] = in.get();
for (int j = 0; j < 3072; j++) {
data[record_start*3072+j] = MTYPE(in.get());
mean[j] += data[record_start*3072+j];
}
}
in.close();
}
char test_name[100];
strcpy(test_name, dir_name);
strcat(test_name, "test_batch.bin");
in.open(test_name, std::ifstream::in | std::ifstream::binary);
printf("reading test batch\n");
for (int i = 0; i < 10000; i++) {
record_start = 5*10000 + i;
labels[record_start] = in.get();
for (int j = 0; j < 3072; j++) {
data[record_start*3072+j] = (MTYPE)in.get();
//mean[j] += data[record_start*3072+j];
}
}
in.close();
/*
for (int j = 0; j < 3072; j++)
mean[j] /= 50000.0;
*/
ifstream in_mean;
in_mean.open("/scratch0/qwang37/cifar-10-batches-bin/data_mean.bin", std::ifstream::in | std::ifstream::binary);
if (in_mean.fail()) {
cout << "open file failed!\n";
return;
}
for (int j = 0; j < 3072; j++)
in_mean.read((char*)(mean+j), 4);
in_mean.close();
for (int i = 0; i < 60000; i++)
for (int j = 0; j < 3072; j++)
data[i*3072+j] -= mean[j];
//data[i*3072+j] -= 127.5;
ofstream out("/scratch0/qwang37/cifar-10-batches-bin/cifar_centered.bin", std::ofstream::out | std::ifstream::binary);
out.write((char*)data, 60000*3072*sizeof(MTYPE));
out.close();
}
void convertToMTYPE() {
ifstream in;
printf("starting copying data\n");
MTYPE* data = (MTYPE*) malloc(60000*3072*sizeof(MTYPE));
char* labels = (char*) malloc(60000*sizeof(char));
char dir_name[] = "/scratch0/qwang37/cifar-10-batches-bin/";
char file_name[] = "data_batch_1.bin";
char full_name[100];
int record_start;
printf("starting copy data\n");
for (int k = 1; k <= 5; k++) {
file_name[11] = '0' + k;
strcpy(full_name, dir_name);
strcat(full_name, file_name);
in.open(full_name, std::ifstream::in | std::ifstream::binary);
if (in.fail()) {
printf("open data file %d failed!\n", k);
exit(-1);
}
printf("reading batch %d\n", k);
for (int i = 0; i < 10000; i++) {
record_start = (k-1)*10000 + i;
labels[record_start] = in.get();
for (int j = 0; j < 3072; j++) {
data[record_start*3072+j] = MTYPE(in.get());
}
}
in.close();
}
char test_name[100];
strcpy(test_name, dir_name);
strcat(test_name, "test_batch.bin");
in.open(test_name);
printf("reading test batch\n");
for (int i = 0; i < 10000; i++) {
record_start = 5*10000 + i;
labels[record_start] = in.get();
for (int j = 0; j < 3072; j++) {
data[record_start*3072+j] = (MTYPE)in.get();
}
}
in.close();
ofstream out("/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin", std::ofstream::out | std::ifstream::binary);
out.write((char*)data, 60000*3072*sizeof(MTYPE));
out.close();
}
void testAssembleMatrix() {
printf("start testAssembleMatrix()\n");
vector<NVMatrix> matrices(4);
Matrix tmp(2,3);
MTYPE* data = tmp.getData();
for (int i = 0; i < 4; i++)
matrices[i] = new NVMatrix(2,3);
// test1
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++)
data[j] = i * 6 + j;
tmp.setTrans(false);
matrices[i].copyFromHost(tmp, true);
}
NVMatrix rowFalse;
assembleNVMatrix(matrices, rowFalse, 0);
rowFalse.printShape("rowFalse");
rowFalse.print(8,3);
for (int i = 0; i < 4; i++) {
matrices[i].resize(0,0);
}
splitNVMatrix(matrices, rowFalse, 0);
char a[10];
for (int i = 0; i < 4; i++) {
sprintf(a, "rowFalse%d", i);
matrices[i].printShape(a);
matrices[i].print(2,3);
}
// test2
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++)
data[j] = i * 6 + j;
tmp.setTrans(true);
matrices[i].copyFromHost(tmp, true);
}
NVMatrix rowTrue;
assembleNVMatrix(matrices, rowTrue, 0);
rowTrue.printShape("rowTrue");
rowTrue.print(8,3);
for (int i = 0; i < 4; i++) {
matrices[i].resize(0,0);
}
splitNVMatrix(matrices, rowTrue, 0);
for (int i = 0; i < 4; i++) {
sprintf(a, "rowTrue%d", i);
matrices[i].printShape(a);
matrices[i].print(2,3);
}
// test3
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++)
data[j] = i * 6 + j;
tmp.setTrans(false);
matrices[i].copyFromHost(tmp, true);
}
NVMatrix colFalse;
assembleNVMatrix(matrices, colFalse, 1);
colFalse.printShape("colFalse");
colFalse.print(2,12);
for (int i = 0; i < 4; i++) {
matrices[i].resize(0,0);
}
splitNVMatrix(matrices, colFalse, 1);
for (int i = 0; i < 4; i++) {
sprintf(a, "colFalse%d", i);
matrices[i].printShape(a);
matrices[i].print(2,3);
}
// test4
for (int i = 0; i < 4; i++) {
for (int j = 0; j < 6; j++)
data[j] = i * 6 + j;
tmp.setTrans(true);
matrices[i].copyFromHost(tmp, true);
}
NVMatrix colTrue;
assembleNVMatrix(matrices, colTrue, 1);
colTrue.printShape("colTrue");
colTrue.print(2,12);
for (int i = 0; i < 4; i++) {
matrices[i].resize(0,0);
}
splitNVMatrix(matrices, colTrue, 1);
for (int i = 0; i < 4; i++) {
sprintf(a, "colTrue%d", i);
matrices[i].printShape(a);
matrices[i].print(2,3);
}
printf("finished testAssembleMatrix()\n");
}
void testAssembleMatrix1() {
printf("start testAssembleMatrix1()\n");
vector<NVMatrix> matrices(4);
NVMatrix mat(32*32*3, 60000);
mat.setTrans(false);
NVReadFromFile(mat, "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin");
splitNVMatrix(matrices, mat, 0);
for (int i = 0; i < 4; i++) {
char a[100];
sprintf(a, "/scratch0/qwang37/cifar-10-batches-bin/testAssemble/r%d.bin", i);
NVSaveToFile(matrices[i], a);
}
NVMatrix ass;
assembleNVMatrix(matrices, ass, 0);
NVSaveToFile(ass, "/scratch0/qwang37/cifar-10-batches-bin/testAssemble.bin");
printf("completed testAssembleMatrix1()!\n");
}
void testAssembleMatrix2() {
printf("start testAssembleMatrix2()\n");
vector<NVMatrix> sub(2);
NVMatrix mat(32*32*3, 60000);
mat.setTrans(false);
NVReadFromFile(mat, "/scratch0/qwang37/cifar-10-batches-bin/cifar_raw.bin");
splitNVMatrix(sub[0], sub[1], mat, 1000, 2072, 0);
for (int i = 0; i < 2; i++) {
char a[100];
sprintf(a, "/scratch0/qwang37/cifar-10-batches-bin/testAssemble/r%d.bin", i);
NVSaveToFile(sub[i], a);
}
NVMatrix ass;
assembleNVMatrix(sub[0], sub[1], ass, 0);
printf("finished assembleNVMatrix!\n");
NVSaveToFile(ass, "/scratch0/qwang37/cifar-10-batches-bin/rowAssemble.bin");
splitNVMatrix(sub[0], sub[1], mat, 20000, 40000, 1);
for (int i = 0; i < 2; i++) {
char a[100];
sprintf(a, "/scratch0/qwang37/cifar-10-batches-bin/testAssemble/c%d.bin", i);
NVSaveToFile(sub[i], a);
}
NVMatrix ass1;
assembleNVMatrix(sub[0], sub[1], ass1, 1);
NVSaveToFile(ass1, "/scratch0/qwang37/cifar-10-batches-bin/colAssemble.bin");
printf("completed testAssembleMatrix2()!\n");
}
void testGenFilterMask() {
curandState* devStates = init_cuda_rand(100);
printf("start testGenFilterMask()!\n");
NVMatrix a;
genFilterMask(a, 10, 10, 0.2, devStates);
a.printShape("a");
a.print(10,10);
}
void testAbs() {
NVMatrix a;
a.resize(16*10,10);
a.setTrans(false);
a.randomizeGaussian();
NVMatrix b;
NVMask maxMask;
convCompeteAbs(a, b, maxMask, 10, 2, 2, 4, 10);
b.print(16*10, 10);
}
|
4bf35c5de8f56ba9404c6d37137485aa08dfb13a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "net.h"
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
Net::Net(){
}
Net::Net(const vector<unsigned> &topology){
init(topology);
}
void Net::init(const vector<unsigned> &topology){
results_h = new double[10];
m_layers.clear();
unsigned numLayers = topology.size();
layers = topology.size();
for(unsigned layerNum = 0; layerNum < numLayers; ++layerNum){
m_layers.push_back(Layer());
//Now we fill the layer with nuerons
//we loop <= since each layer has a bias nueron
unsigned numOutputs = layerNum == topology.size()-1 ? 0 : topology[layerNum+1];
for(unsigned neuronNum = 0; neuronNum<=topology[layerNum]; ++neuronNum){
//make a new Nueron
m_layers.back().push_back(Neuron(numOutputs, neuronNum));
}
//Force the bias nodes's output value to 1.0
m_layers.back().back().setOutputVal(1.0);
}
}
void Net::allocmemGPU(){
//hipMalloc((void*) *layers_d, sizeof(int));
hipMalloc((void**) &topology_d, sizeof(int)*layers);
int topology_h[layers];
int osize =0;
int wsize = 0;
for(int i=0; i<layers; i++){
topology_h[i] = m_layers[i].size();
osize+=m_layers[i].size();
}
hipMemcpy(topology_d,&topology_h, sizeof(int)*layers, hipMemcpyHostToDevice);
for(int l=0; l<layers; l++){
for(int n=0;n<topology_h[l];n++){
wsize+=m_layers[l][n].m_outputWeights.size();
}
}
double *weights_h = new double[wsize];
double *deltaweights_h = new double[wsize];
double *outputval_h = new double[osize];
int wcounter=0;
int lcounter=0;
for(int l=0; l<layers; l++){
for(int n=0;n<topology_h[l];n++){
for(int i =0;i<m_layers[l][n].m_outputWeights.size();i++){
weights_h[i+wcounter] = m_layers[l][n].m_outputWeights[i].weight;
deltaweights_h[i+wcounter] = m_layers[l][n].m_outputWeights[i].deltaWeight;
}
wcounter+=m_layers[l][n].m_outputWeights.size();
outputval_h[lcounter+n]=m_layers[l][n].m_outputVal;
}
lcounter+=topology_h[l];
}
hipMalloc((void**) &targetvals_d, sizeof(double)*10);
hipMalloc((void**) &weights_d, sizeof(double)*wsize);
hipMalloc((void**) &deltaweights_d, sizeof(double)*wsize);
hipMalloc((void**) &outputval_d, sizeof(double)*osize);
hipMalloc((void**) &gradients_d, sizeof(double)*osize);
hipMalloc((void**) &error_d, sizeof(int));
hipDeviceSynchronize();
hipMemcpy(weights_d,weights_h, sizeof(double)*wsize, hipMemcpyHostToDevice);
hipMemcpy(deltaweights_d,deltaweights_h, sizeof(double)*wsize, hipMemcpyHostToDevice);
hipMemcpy(outputval_d,outputval_h, sizeof(double)*osize, hipMemcpyHostToDevice);
hipDeviceSynchronize();
delete[] weights_h;
delete[] deltaweights_h;
delete[] outputval_h;
}
void Net::deallocmemGPU(){
hipFree(weights_d);
hipFree(deltaweights_d);
hipFree(topology_d);
hipFree(outputval_d);
hipFree(gradients_d);
hipFree(error_d);
hipFree(targetvals_d);
}
void Net::copyGpuToCpu(){
//hipMemcpy(C_h, C_d, sizeof(float)*n, hipMemcpyDeviceToHost);
int topology_h[layers];
int osize =0;
for(int i=0; i<layers; i++){
osize+=m_layers[i].size();
}
hipMemcpy(topology_h, topology_d, sizeof(int)*layers, hipMemcpyDeviceToHost);
vector<unsigned> topology;
for(int i=0; i<layers; i++){
topology_h[i]--;
topology.push_back(topology_h[i]);
topology_h[i]++;
}
init(topology);
int wsize = 0;
for(int l=0; l<layers; l++){
for(int n=0;n<topology_h[l];n++){
wsize+=m_layers[l][n].m_outputWeights.size();
}
}
double *weights_h = new double[wsize];
double *deltaweights_h = new double[wsize];
double *outputval_h = new double[osize];
int wcounter=0;
int lcounter=0;
hipMemcpy(weights_h,weights_d, sizeof(double)*wsize, hipMemcpyDeviceToHost);
hipMemcpy(deltaweights_h,deltaweights_d, sizeof(double)*wsize, hipMemcpyDeviceToHost);
hipMemcpy(outputval_h,outputval_d, sizeof(double)*osize, hipMemcpyDeviceToHost);
hipDeviceSynchronize();
for(int l=0; l<layers; l++){
for(int n=0;n<topology_h[l];n++){
for(int i =0;i<m_layers[l][n].m_outputWeights.size();i++){
m_layers[l][n].m_outputWeights[i].weight = weights_h[i+wcounter];
m_layers[l][n].m_outputWeights[i].deltaWeight = deltaweights_h[i+wcounter];
}
wcounter+=m_layers[l][n].m_outputWeights.size();
m_layers[l][n].m_outputVal = outputval_h[lcounter+n];
}
lcounter+=topology_h[l];
}
delete[] weights_h;
delete[] deltaweights_h;
delete[] outputval_h;
}
/*Going to take a file, use a vector representation of that file, then create neurons like so.*/
/*File is going to be lengths separated by space, \n\n weights separated by \n\n,- error. */
/*Note: As of 11/18, we haven't implemented the error part yet in the file.*/
/*Returns -1 on error.*/
/*The "loader."*/
Net::Net(string filename)
{
FILE* fp;
long fsize;
//size_t result;
char* buf;
fp = fopen(filename.c_str(), "r");
/*Just some code to allocate and fill a buffer with the file contents.*/
fseek(fp, 0, SEEK_END);
fsize = ftell(fp);
rewind(fp);
buf = (char*)malloc(fsize*sizeof(char));
fread(buf, 1, fsize, fp);
fclose(fp);
char * initialbuf = buf;
unsigned numLayers;
/*This gets the number of layers based on the layout of the file, then creates an appropriate vector based on that size.*/
// memcpy(&(buf[0]), &numLayers, (buf - &(buf[0])));
memcpy(&numLayers, buf, sizeof(unsigned));
// cout << "numLayers:" << numLayers << endl;
buf += sizeof(unsigned);
// printf(buf, "test:%d\n", *buf);
char * layerVals = buf; /*How many elements are in the current layer. This is a pointer to it.*/
for(int i = 0; i < numLayers; i++)
{
buf += sizeof(int); /*Skip past all the layers to where the first piece of actual data is.*/
}
// memcpy((buf), &sum, ((buf)+sizeof(uint32_t) - &(buf));
// buf+= sizeof(uint32_t);
for(unsigned layerNum = 0; layerNum < numLayers; layerNum++)
{
m_layers.push_back(Layer());
double outputVal;
int outWeightssize;
vector<Connection> outputWeights;
unsigned myindex;
double gradient;
int sum;
memcpy(&sum, layerVals, sizeof(int));
// cout << "sum:" << sum << endl;
int counter=0;
while(counter != sum)
{
//Do memcpy + size of what you're trying to copy, + size of char for the space for each thing. 11/18.
// memcpy(buf, &outputVal, (sizeof(double)));
memcpy(&outputVal, buf, sizeof(double));
buf += sizeof(double);
memcpy(&outWeightssize, buf, sizeof(int));
buf = buf + sizeof(int);
// printf("outWeightssize:%d\n", outWeightssize);
for(int i = 0; i < outWeightssize; i++)
{
double tmp;
outputWeights.push_back(Connection());
memcpy(&tmp, buf, sizeof(double));
outputWeights.back().weight = tmp;
buf = buf + sizeof(double);
memcpy(&tmp, buf, sizeof(double));
outputWeights.back().deltaWeight = tmp;
buf = buf + sizeof(double);
cout << "Vals:" << "outWeightssize:"<< outWeightssize << " " << counter << " " << outputWeights.back().weight << " " << outputWeights.back().deltaWeight << endl;
}
memcpy(&myindex, buf, (sizeof(unsigned)));
buf = buf + sizeof(unsigned); // Go past myindex + ' '
memcpy(&gradient, buf, sizeof(double));
buf = buf + sizeof(double); // Go past gradient + ' '
m_layers.back().push_back(Neuron(outputVal, outputWeights, myindex, gradient)); // Invoke the constructor made that takes all the values as input. Might not be .back eventually.
outputWeights.clear();
// buf = buf + sizeof(unsigned); // Go past the newline.
// cout << "Into constructor:" << outputVal << " " << myindex << " " << gradient << endl;
counter++;
}
layerVals += sizeof(int);
}
// /*Skip to where the first neuron is.*/
// buf = strstr(buf, "\n\n");
// buf+=2; //Skip the newlines.
//i dont really get this part in the below ctor, so ill just continue with what i think is correct.
free(initialbuf);
}
/*Takes in a filename. Will output num outputs - outputs - error onto the file.*/
/*Returns 0 on success, -1 on error.*/
/*The "saver."*/
int Net::outputToFile(string filename)
{
FILE* fp;
/*Assume valid filename*/
fp = fopen(filename.c_str(), "w");
if(!fp)
return -1;
vector<Layer>::iterator it;
vector<Neuron>::iterator iter;
vector<int> neuronSizes;
uint32_t sum=0;
//Get the size of all the neuron vectors.
for(it = m_layers.begin(); it != m_layers.end(); it++)
{
sum += it->size();
neuronSizes.push_back(it->size()); //This isn't used for now. Can probably be used for error-checking later.
}
//size of m_layers, then a space, then size of each neurons vector, then two newlines.
// fprintf(fp, "%zu' '", m_layers.size());
unsigned n_layers = m_layers.size();
cout << "Num_layers:" << n_layers << endl;
fwrite(&n_layers, sizeof(unsigned), 1, fp);
for(vector<int>::iterator i=neuronSizes.begin(); i!=neuronSizes.end();i++)
{
/*Put the size of each neuron vector into the file.*/
// fprintf(fp, "%d' '", &sum);
int size = *i;
printf("size:%d\n", size);
fwrite(&size, sizeof(int), 1, fp);
}
/*Separate the contents with two newlines.*/
// fprintf(fp, "\n\n");
//Iterate through layers
for(it = m_layers.begin(); it != m_layers.end(); it++)
{
//Iterate through neurons.
for(iter = it->begin(); iter != it->end(); iter++)
{
//Put the value of the neurons in the file.
// fprintf(fp, "%F' '", iter->m_outputVal);
fwrite(&(iter->m_outputVal), sizeof(double), 1, fp);
// fprintf(fp, "%d' '",iter->m_outputWeights.size()); // size of vector
int vecsize = iter->m_outputWeights.size();
fwrite(&vecsize, sizeof(int), 1, fp);
// int temp123;
// fseek(fp, -sizeof(int), SEEK_CUR);
// fread(&temp123, sizeof(int), 1, fp);
// printf("vecsize:%d\n",temp123);
// printf("vecsize:%d\n", vecsize);
for(vector<Connection>::iterator coni=iter->m_outputWeights.begin(); coni!=iter->m_outputWeights.end(); coni++)
{
// fprintf(fp, "%F' '%F' '", coni->weight, coni->deltaWeight); // vector contents
fwrite(&(coni->weight), sizeof(double), 1, fp);
fwrite(&(coni->deltaWeight), sizeof(double), 1, fp);
}
// fprintf(fp, "%u' '", iter->m_myIndex);
fwrite(&(iter->m_myIndex), sizeof(unsigned), 1, fp);
// fprintf(fp, "%F' '", iter->m_gradient);
fwrite(&(iter->m_gradient), sizeof(double), 1, fp);
/*I don't think these are needed*/
// fprintf(fp, "%F' '", iter->eta);
// fprintf(fp, "%F' '", iter->alpha);
/*Separate each neuron in a layer with a single newline.*/
// fprintf(fp, "\n");
}
//Separate each layer with two newlines.
// fprintf(fp, "\n\n");
}
//Eventually we'll want some error handling here. Otherwise, everything else should be good.
fclose(fp);
return 0; //I don't think anything here can really fail.
}
void Net::getResults(vector<double> &resultVals) const {
resultVals.clear();
for(unsigned n = 0; n < m_layers.back().size() - 1; ++n){
resultVals.push_back(m_layers.back()[n].getOutputVal());
}
}
void Net::feedForward(vector<double> &inputVals){
assert(inputVals.size()==m_layers[0].size() - 1);
//Latch the input vals into the input nuerons
for(unsigned i= 0; i<inputVals.size(); ++i){
m_layers[0][i].setOutputVal(inputVals[i]);
}
//Forward Propigation
for(unsigned layerNum = 1; layerNum < m_layers.size(); ++layerNum){
Layer &prevLayer = m_layers[layerNum - 1];
for(unsigned n=0; n<m_layers[layerNum].size() - 1; ++n){
m_layers[layerNum][n].feedForward(prevLayer);
}
}
}
__global__ void latch(double * inputvals, double * nueronoutputvals){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<784){
nueronoutputvals[i]=inputvals[i];
}
}
__global__ void feedForwardkernel(double * weights,
double * nueronoutputvals,int *topology, int currlayer, int outoffset, int woffset){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if(i<(topology[currlayer+1]-1))
{
for(unsigned n=0; n < topology[currlayer]; ++n){
//printf("Weight off %d\n",woffset);
sum += nueronoutputvals[outoffset+n] * weights[woffset + (n*(topology[currlayer+1]-1)) +i];
//prevLayer[n].getOutputVal() * prevLayer[n].m_outputWeights[m_myIndex].weight;
}
//printf("Thread %d Had a sum of %f\n",i,sum);
//__syncthreads();
sum/=(topology[currlayer]/2.0);
//printf("out off%d\n",outoffset);
//__syncthreads();
nueronoutputvals[outoffset+topology[currlayer]+i]=tanhf(sum);
}
}
void Net::feedForwardParallel(double * invals){
double* invals_d;
hipMalloc((void**) &invals_d, sizeof(double)*784);
//hipDeviceSynchronize();
hipMemcpy(invals_d,invals, sizeof(double)*784, hipMemcpyHostToDevice);
hipDeviceSynchronize();
dim3 dim_block_latch(256,1,1);
dim3 dim_grid_latch(4,1,1);
//run a lacth kernel
hipLaunchKernelGGL(( latch), dim3(dim_grid_latch),dim3(dim_block_latch), 0, 0, invals_d,outputval_d);
hipDeviceSynchronize();
hipFree(invals_d);
dim3 dim_block(512,1,1);
dim3 dim_grid(8,1,1);
int osize = 0;
int wsize = 0;
for(int i=0;i<layers-1;i++){
dim3 dim_block(512,1,1);
dim3 dim_grid((int)((m_layers[i+1].size()/512)+1),1,1);
//printf("Launching forward kernel\n");
hipLaunchKernelGGL(( feedForwardkernel), dim3(dim_grid), dim3(dim_block), 0, 0, weights_d, outputval_d ,topology_d, i, osize, wsize);
hipDeviceSynchronize();
osize+=m_layers[i].size();
wsize+=m_layers[i].size()*(m_layers[i+1].size()-1);
}
}
__global__ void getResultskernel(double * results, int outoffset, double* outputvals){
int tid = threadIdx.x;
if(tid<10){
results[tid] = outputvals[outoffset+tid];
}
}
void Net::getResultsFromGPU(){
//Can be stored so that the this does not need to be computed
int osize;
for(int i=0; i<layers-1; i++){
osize+=m_layers[i].size();
}
hipMalloc((void**) &results_d, sizeof(double)*10);
dim3 dim_block(16,1,1);
dim3 dim_grid(1,1,1);
hipLaunchKernelGGL(( getResultskernel), dim3(dim_grid), dim3(dim_block), 0, 0, results_d, osize, outputval_d);
hipDeviceSynchronize();
for(int i=0;i<10;i++){
results_h[i]=0.0;
}
hipMemcpy(results_h,results_d, sizeof(double)*10, hipMemcpyDeviceToHost);
hipFree(results_d);
}
__global__ void calcOutputGradientskernel(double * targetvals, double * outputvals,double * gradients, int outoffset){
int tid = threadIdx.x;
if(tid<10){
double delta =targetvals[tid] - outputvals[outoffset+tid];
gradients[outoffset+tid] = delta * (1.0 - (outputvals[outoffset+tid]*outputvals[outoffset+tid]));
}
}
__global__ void calcHiddenGradientskernel(double * weights,double * gradients, int outoffset,int woffset, int * topology, int currentlayer, double * outputvals){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < topology[currentlayer]){
double dow = 0.0;
for(int n=0; n< topology[currentlayer+1] - 1; ++n){
dow+=weights[woffset + (i*(topology[currentlayer+1]-1)) + n] * gradients[outoffset+topology[currentlayer]+n];
}
gradients[outoffset+i] = dow * (1.0 - (outputvals[outoffset+i]*outputvals[outoffset+i]));
gradients[outoffset+i] /= topology[currentlayer+1];
}
}
__global__ void updateInputWeightskernel(double * weights,double * gradients, double* outputvals, int woffset,
int outoffset, double * deltaweights, int *topology, int currlayer){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < topology[currlayer] - 1){
for(int n = 0; n < topology[currlayer-1]; ++n){
//Neuron &neuron = prevLayer[n];
//double oldDeltaWeight = neuron.m_outputWeights[m_myIndex].deltaWeight;
double newDeltaWeight =
//individual input , magnified by the gradient and train rate
.39
* outputvals[outoffset-topology[currlayer-1]+n]
* gradients[outoffset+i]
//Also add momentum = a fraction of the previos delta weight
+ .1
* deltaweights[woffset + (n*(topology[currlayer]-1)) +i];
deltaweights[woffset + (n*(topology[currlayer]-1)) +i] = newDeltaWeight;
weights[woffset + (n*(topology[currlayer]-1)) +i] += newDeltaWeight;
}
}
}
void Net::backPropParallel(double * targetvals){
hipMemcpy(targetvals_d,targetvals, sizeof(double)*10, hipMemcpyHostToDevice);
//calcoutput gradients
int osize = 0;
int wsize = 0;
int osize2 = 0;
int wsize2 = 0;
for(int i=0; i<layers-1; i++){
osize+=m_layers[i].size();
//wsize+=m_layers[i].size()*(m_layers[i+1].size()-1);
}
if(layers>2){
for(int i=0; i<layers-2; i++){
wsize+=m_layers[i].size()*(m_layers[i+1].size()-1);
osize2+=m_layers[i].size();
}
}
wsize2=wsize;
dim3 dim_block(16,1,1);
dim3 dim_grid(1,1,1);
hipLaunchKernelGGL(( calcOutputGradientskernel), dim3(dim_grid), dim3(dim_block), 0, 0, targetvals_d, outputval_d ,gradients_d, osize);
hipDeviceSynchronize();
//calc hidden gradients by going backwords through net
if(layers>2){
for(int l = layers - 2; l>0; --l){
dim3 dim_block(512,1,1);
dim3 dim_grid((int)((m_layers[l].size()/512)+1),1,1);
/*
printf("Calc Hidden Kernel Launch\n");
printf("The weight offset: %d\n" , wsize2);
printf("The output offset: %d\n" , osize2);
printf("The Current Layer: %d\n" , l);
*/
hipLaunchKernelGGL(( calcHiddenGradientskernel), dim3(dim_grid), dim3(dim_block), 0, 0, weights_d,gradients_d,osize2,wsize2,topology_d, l,outputval_d);
hipDeviceSynchronize();
osize2-=m_layers[l-1].size();
wsize2-=m_layers[l-1].size()*(m_layers[l].size()-1);
}
}
//update input weights
for(int l = layers - 1; l>0; --l){
dim3 dim_block(512,1,1);
dim3 dim_grid((int)((m_layers[l].size()/512)+1),1,1);
/*
printf("Update Inout Weights LAunch\n");
printf("The weight offset: %d\n" , wsize);
printf("The output offset: %d\n" , osize);
printf("The Current Layer: %d\n" , l);
*/
hipLaunchKernelGGL(( updateInputWeightskernel), dim3(dim_grid), dim3(dim_block), 0, 0, weights_d,gradients_d,outputval_d,wsize, osize, deltaweights_d,topology_d,l);
hipDeviceSynchronize();
osize-=m_layers[l-1].size();
if(l-2>=0)
wsize-=m_layers[l-2].size()*(m_layers[l-1].size()-1);
}
}
void Net::backProp(const vector<double> &targetVals){
//calculate overall Net error (RMS of output neuron errors)
assert(targetVals.size()==m_layers.back().size()-1);
Layer &outputLayer = m_layers.back();
m_error = 0.0;
for(unsigned n = 0; n< outputLayer.size() - 1; ++n){
double delta = targetVals[n] -outputLayer[n].getOutputVal();
m_error += delta*delta;
}
m_error /= outputLayer.size() - 1;
m_error = sqrt(m_error);
//Implement a recent average measurement
m_recentAverageError =
(m_recentAverageError * m_recentAverageSmoothingFactor + m_error)
/ (m_recentAverageSmoothingFactor + 1.0);
// Calculate output layer gradients
for(unsigned n = 0; n< outputLayer.size() - 1; ++n){
outputLayer[n].calcOutputGradients(targetVals[n]);
}
//calculate gradients on all hidden layers
for(unsigned layerNum = m_layers.size() - 2; layerNum>0; --layerNum){
Layer &hiddenLayer = m_layers[layerNum];
Layer &nextLayer = m_layers[layerNum + 1];
for(unsigned n = 0; n<hiddenLayer.size(); ++n){
hiddenLayer[n].calcHiddenGradients(nextLayer);
}
}
//From all layers from outputs to first hidden layer,
//update connection weights
for(unsigned layerNum = m_layers.size() - 1; layerNum > 0; --layerNum)
{
Layer &layer = m_layers[layerNum];
Layer &prevLayer = m_layers[layerNum - 1];
for(unsigned n=0; n<layer.size() - 1; ++n){
layer[n].updateInputWeights(prevLayer);
}
}
}
| 4bf35c5de8f56ba9404c6d37137485aa08dfb13a.cu | #include "net.h"
#include <stdint.h>
#include <stdlib.h>
#include <stdio.h>
Net::Net(){
}
Net::Net(const vector<unsigned> &topology){
init(topology);
}
void Net::init(const vector<unsigned> &topology){
results_h = new double[10];
m_layers.clear();
unsigned numLayers = topology.size();
layers = topology.size();
for(unsigned layerNum = 0; layerNum < numLayers; ++layerNum){
m_layers.push_back(Layer());
//Now we fill the layer with nuerons
//we loop <= since each layer has a bias nueron
unsigned numOutputs = layerNum == topology.size()-1 ? 0 : topology[layerNum+1];
for(unsigned neuronNum = 0; neuronNum<=topology[layerNum]; ++neuronNum){
//make a new Nueron
m_layers.back().push_back(Neuron(numOutputs, neuronNum));
}
//Force the bias nodes's output value to 1.0
m_layers.back().back().setOutputVal(1.0);
}
}
void Net::allocmemGPU(){
//cudaMalloc((void*) *layers_d, sizeof(int));
cudaMalloc((void**) &topology_d, sizeof(int)*layers);
int topology_h[layers];
int osize =0;
int wsize = 0;
for(int i=0; i<layers; i++){
topology_h[i] = m_layers[i].size();
osize+=m_layers[i].size();
}
cudaMemcpy(topology_d,&topology_h, sizeof(int)*layers, cudaMemcpyHostToDevice);
for(int l=0; l<layers; l++){
for(int n=0;n<topology_h[l];n++){
wsize+=m_layers[l][n].m_outputWeights.size();
}
}
double *weights_h = new double[wsize];
double *deltaweights_h = new double[wsize];
double *outputval_h = new double[osize];
int wcounter=0;
int lcounter=0;
for(int l=0; l<layers; l++){
for(int n=0;n<topology_h[l];n++){
for(int i =0;i<m_layers[l][n].m_outputWeights.size();i++){
weights_h[i+wcounter] = m_layers[l][n].m_outputWeights[i].weight;
deltaweights_h[i+wcounter] = m_layers[l][n].m_outputWeights[i].deltaWeight;
}
wcounter+=m_layers[l][n].m_outputWeights.size();
outputval_h[lcounter+n]=m_layers[l][n].m_outputVal;
}
lcounter+=topology_h[l];
}
cudaMalloc((void**) &targetvals_d, sizeof(double)*10);
cudaMalloc((void**) &weights_d, sizeof(double)*wsize);
cudaMalloc((void**) &deltaweights_d, sizeof(double)*wsize);
cudaMalloc((void**) &outputval_d, sizeof(double)*osize);
cudaMalloc((void**) &gradients_d, sizeof(double)*osize);
cudaMalloc((void**) &error_d, sizeof(int));
cudaDeviceSynchronize();
cudaMemcpy(weights_d,weights_h, sizeof(double)*wsize, cudaMemcpyHostToDevice);
cudaMemcpy(deltaweights_d,deltaweights_h, sizeof(double)*wsize, cudaMemcpyHostToDevice);
cudaMemcpy(outputval_d,outputval_h, sizeof(double)*osize, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
delete[] weights_h;
delete[] deltaweights_h;
delete[] outputval_h;
}
void Net::deallocmemGPU(){
cudaFree(weights_d);
cudaFree(deltaweights_d);
cudaFree(topology_d);
cudaFree(outputval_d);
cudaFree(gradients_d);
cudaFree(error_d);
cudaFree(targetvals_d);
}
void Net::copyGpuToCpu(){
//cudaMemcpy(C_h, C_d, sizeof(float)*n, cudaMemcpyDeviceToHost);
int topology_h[layers];
int osize =0;
for(int i=0; i<layers; i++){
osize+=m_layers[i].size();
}
cudaMemcpy(topology_h, topology_d, sizeof(int)*layers, cudaMemcpyDeviceToHost);
vector<unsigned> topology;
for(int i=0; i<layers; i++){
topology_h[i]--;
topology.push_back(topology_h[i]);
topology_h[i]++;
}
init(topology);
int wsize = 0;
for(int l=0; l<layers; l++){
for(int n=0;n<topology_h[l];n++){
wsize+=m_layers[l][n].m_outputWeights.size();
}
}
double *weights_h = new double[wsize];
double *deltaweights_h = new double[wsize];
double *outputval_h = new double[osize];
int wcounter=0;
int lcounter=0;
cudaMemcpy(weights_h,weights_d, sizeof(double)*wsize, cudaMemcpyDeviceToHost);
cudaMemcpy(deltaweights_h,deltaweights_d, sizeof(double)*wsize, cudaMemcpyDeviceToHost);
cudaMemcpy(outputval_h,outputval_d, sizeof(double)*osize, cudaMemcpyDeviceToHost);
cudaDeviceSynchronize();
for(int l=0; l<layers; l++){
for(int n=0;n<topology_h[l];n++){
for(int i =0;i<m_layers[l][n].m_outputWeights.size();i++){
m_layers[l][n].m_outputWeights[i].weight = weights_h[i+wcounter];
m_layers[l][n].m_outputWeights[i].deltaWeight = deltaweights_h[i+wcounter];
}
wcounter+=m_layers[l][n].m_outputWeights.size();
m_layers[l][n].m_outputVal = outputval_h[lcounter+n];
}
lcounter+=topology_h[l];
}
delete[] weights_h;
delete[] deltaweights_h;
delete[] outputval_h;
}
/*Going to take a file, use a vector representation of that file, then create neurons like so.*/
/*File is going to be lengths separated by space, \n\n weights separated by \n\n,- error. */
/*Note: As of 11/18, we haven't implemented the error part yet in the file.*/
/*Returns -1 on error.*/
/*The "loader."*/
Net::Net(string filename)
{
FILE* fp;
long fsize;
//size_t result;
char* buf;
fp = fopen(filename.c_str(), "r");
/*Just some code to allocate and fill a buffer with the file contents.*/
fseek(fp, 0, SEEK_END);
fsize = ftell(fp);
rewind(fp);
buf = (char*)malloc(fsize*sizeof(char));
fread(buf, 1, fsize, fp);
fclose(fp);
char * initialbuf = buf;
unsigned numLayers;
/*This gets the number of layers based on the layout of the file, then creates an appropriate vector based on that size.*/
// memcpy(&(buf[0]), &numLayers, (buf - &(buf[0])));
memcpy(&numLayers, buf, sizeof(unsigned));
// cout << "numLayers:" << numLayers << endl;
buf += sizeof(unsigned);
// printf(buf, "test:%d\n", *buf);
char * layerVals = buf; /*How many elements are in the current layer. This is a pointer to it.*/
for(int i = 0; i < numLayers; i++)
{
buf += sizeof(int); /*Skip past all the layers to where the first piece of actual data is.*/
}
// memcpy((buf), &sum, ((buf)+sizeof(uint32_t) - &(buf));
// buf+= sizeof(uint32_t);
for(unsigned layerNum = 0; layerNum < numLayers; layerNum++)
{
m_layers.push_back(Layer());
double outputVal;
int outWeightssize;
vector<Connection> outputWeights;
unsigned myindex;
double gradient;
int sum;
memcpy(&sum, layerVals, sizeof(int));
// cout << "sum:" << sum << endl;
int counter=0;
while(counter != sum)
{
//Do memcpy + size of what you're trying to copy, + size of char for the space for each thing. 11/18.
// memcpy(buf, &outputVal, (sizeof(double)));
memcpy(&outputVal, buf, sizeof(double));
buf += sizeof(double);
memcpy(&outWeightssize, buf, sizeof(int));
buf = buf + sizeof(int);
// printf("outWeightssize:%d\n", outWeightssize);
for(int i = 0; i < outWeightssize; i++)
{
double tmp;
outputWeights.push_back(Connection());
memcpy(&tmp, buf, sizeof(double));
outputWeights.back().weight = tmp;
buf = buf + sizeof(double);
memcpy(&tmp, buf, sizeof(double));
outputWeights.back().deltaWeight = tmp;
buf = buf + sizeof(double);
cout << "Vals:" << "outWeightssize:"<< outWeightssize << " " << counter << " " << outputWeights.back().weight << " " << outputWeights.back().deltaWeight << endl;
}
memcpy(&myindex, buf, (sizeof(unsigned)));
buf = buf + sizeof(unsigned); // Go past myindex + ' '
memcpy(&gradient, buf, sizeof(double));
buf = buf + sizeof(double); // Go past gradient + ' '
m_layers.back().push_back(Neuron(outputVal, outputWeights, myindex, gradient)); // Invoke the constructor made that takes all the values as input. Might not be .back eventually.
outputWeights.clear();
// buf = buf + sizeof(unsigned); // Go past the newline.
// cout << "Into constructor:" << outputVal << " " << myindex << " " << gradient << endl;
counter++;
}
layerVals += sizeof(int);
}
// /*Skip to where the first neuron is.*/
// buf = strstr(buf, "\n\n");
// buf+=2; //Skip the newlines.
//i dont really get this part in the below ctor, so ill just continue with what i think is correct.
free(initialbuf);
}
/*Takes in a filename. Will output num outputs - outputs - error onto the file.*/
/*Returns 0 on success, -1 on error.*/
/*The "saver."*/
int Net::outputToFile(string filename)
{
FILE* fp;
/*Assume valid filename*/
fp = fopen(filename.c_str(), "w");
if(!fp)
return -1;
vector<Layer>::iterator it;
vector<Neuron>::iterator iter;
vector<int> neuronSizes;
uint32_t sum=0;
//Get the size of all the neuron vectors.
for(it = m_layers.begin(); it != m_layers.end(); it++)
{
sum += it->size();
neuronSizes.push_back(it->size()); //This isn't used for now. Can probably be used for error-checking later.
}
//size of m_layers, then a space, then size of each neurons vector, then two newlines.
// fprintf(fp, "%zu' '", m_layers.size());
unsigned n_layers = m_layers.size();
cout << "Num_layers:" << n_layers << endl;
fwrite(&n_layers, sizeof(unsigned), 1, fp);
for(vector<int>::iterator i=neuronSizes.begin(); i!=neuronSizes.end();i++)
{
/*Put the size of each neuron vector into the file.*/
// fprintf(fp, "%d' '", &sum);
int size = *i;
printf("size:%d\n", size);
fwrite(&size, sizeof(int), 1, fp);
}
/*Separate the contents with two newlines.*/
// fprintf(fp, "\n\n");
//Iterate through layers
for(it = m_layers.begin(); it != m_layers.end(); it++)
{
//Iterate through neurons.
for(iter = it->begin(); iter != it->end(); iter++)
{
//Put the value of the neurons in the file.
// fprintf(fp, "%F' '", iter->m_outputVal);
fwrite(&(iter->m_outputVal), sizeof(double), 1, fp);
// fprintf(fp, "%d' '",iter->m_outputWeights.size()); // size of vector
int vecsize = iter->m_outputWeights.size();
fwrite(&vecsize, sizeof(int), 1, fp);
// int temp123;
// fseek(fp, -sizeof(int), SEEK_CUR);
// fread(&temp123, sizeof(int), 1, fp);
// printf("vecsize:%d\n",temp123);
// printf("vecsize:%d\n", vecsize);
for(vector<Connection>::iterator coni=iter->m_outputWeights.begin(); coni!=iter->m_outputWeights.end(); coni++)
{
// fprintf(fp, "%F' '%F' '", coni->weight, coni->deltaWeight); // vector contents
fwrite(&(coni->weight), sizeof(double), 1, fp);
fwrite(&(coni->deltaWeight), sizeof(double), 1, fp);
}
// fprintf(fp, "%u' '", iter->m_myIndex);
fwrite(&(iter->m_myIndex), sizeof(unsigned), 1, fp);
// fprintf(fp, "%F' '", iter->m_gradient);
fwrite(&(iter->m_gradient), sizeof(double), 1, fp);
/*I don't think these are needed*/
// fprintf(fp, "%F' '", iter->eta);
// fprintf(fp, "%F' '", iter->alpha);
/*Separate each neuron in a layer with a single newline.*/
// fprintf(fp, "\n");
}
//Separate each layer with two newlines.
// fprintf(fp, "\n\n");
}
//Eventually we'll want some error handling here. Otherwise, everything else should be good.
fclose(fp);
return 0; //I don't think anything here can really fail.
}
void Net::getResults(vector<double> &resultVals) const {
resultVals.clear();
for(unsigned n = 0; n < m_layers.back().size() - 1; ++n){
resultVals.push_back(m_layers.back()[n].getOutputVal());
}
}
void Net::feedForward(vector<double> &inputVals){
assert(inputVals.size()==m_layers[0].size() - 1);
//Latch the input vals into the input nuerons
for(unsigned i= 0; i<inputVals.size(); ++i){
m_layers[0][i].setOutputVal(inputVals[i]);
}
//Forward Propigation
for(unsigned layerNum = 1; layerNum < m_layers.size(); ++layerNum){
Layer &prevLayer = m_layers[layerNum - 1];
for(unsigned n=0; n<m_layers[layerNum].size() - 1; ++n){
m_layers[layerNum][n].feedForward(prevLayer);
}
}
}
__global__ void latch(double * inputvals, double * nueronoutputvals){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i<784){
nueronoutputvals[i]=inputvals[i];
}
}
__global__ void feedForwardkernel(double * weights,
double * nueronoutputvals,int *topology, int currlayer, int outoffset, int woffset){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
double sum = 0.0;
if(i<(topology[currlayer+1]-1))
{
for(unsigned n=0; n < topology[currlayer]; ++n){
//printf("Weight off %d\n",woffset);
sum += nueronoutputvals[outoffset+n] * weights[woffset + (n*(topology[currlayer+1]-1)) +i];
//prevLayer[n].getOutputVal() * prevLayer[n].m_outputWeights[m_myIndex].weight;
}
//printf("Thread %d Had a sum of %f\n",i,sum);
//__syncthreads();
sum/=(topology[currlayer]/2.0);
//printf("out off%d\n",outoffset);
//__syncthreads();
nueronoutputvals[outoffset+topology[currlayer]+i]=tanhf(sum);
}
}
void Net::feedForwardParallel(double * invals){
double* invals_d;
cudaMalloc((void**) &invals_d, sizeof(double)*784);
//cudaDeviceSynchronize();
cudaMemcpy(invals_d,invals, sizeof(double)*784, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
dim3 dim_block_latch(256,1,1);
dim3 dim_grid_latch(4,1,1);
//run a lacth kernel
latch<<<dim_grid_latch,dim_block_latch>>>(invals_d,outputval_d);
cudaDeviceSynchronize();
cudaFree(invals_d);
dim3 dim_block(512,1,1);
dim3 dim_grid(8,1,1);
int osize = 0;
int wsize = 0;
for(int i=0;i<layers-1;i++){
dim3 dim_block(512,1,1);
dim3 dim_grid((int)((m_layers[i+1].size()/512)+1),1,1);
//printf("Launching forward kernel\n");
feedForwardkernel<<<dim_grid, dim_block>>>(weights_d, outputval_d ,topology_d, i, osize, wsize);
cudaDeviceSynchronize();
osize+=m_layers[i].size();
wsize+=m_layers[i].size()*(m_layers[i+1].size()-1);
}
}
__global__ void getResultskernel(double * results, int outoffset, double* outputvals){
int tid = threadIdx.x;
if(tid<10){
results[tid] = outputvals[outoffset+tid];
}
}
void Net::getResultsFromGPU(){
//Can be stored so that the this does not need to be computed
int osize;
for(int i=0; i<layers-1; i++){
osize+=m_layers[i].size();
}
cudaMalloc((void**) &results_d, sizeof(double)*10);
dim3 dim_block(16,1,1);
dim3 dim_grid(1,1,1);
getResultskernel<<<dim_grid, dim_block>>>(results_d, osize, outputval_d);
cudaDeviceSynchronize();
for(int i=0;i<10;i++){
results_h[i]=0.0;
}
cudaMemcpy(results_h,results_d, sizeof(double)*10, cudaMemcpyDeviceToHost);
cudaFree(results_d);
}
__global__ void calcOutputGradientskernel(double * targetvals, double * outputvals,double * gradients, int outoffset){
int tid = threadIdx.x;
if(tid<10){
double delta =targetvals[tid] - outputvals[outoffset+tid];
gradients[outoffset+tid] = delta * (1.0 - (outputvals[outoffset+tid]*outputvals[outoffset+tid]));
}
}
__global__ void calcHiddenGradientskernel(double * weights,double * gradients, int outoffset,int woffset, int * topology, int currentlayer, double * outputvals){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < topology[currentlayer]){
double dow = 0.0;
for(int n=0; n< topology[currentlayer+1] - 1; ++n){
dow+=weights[woffset + (i*(topology[currentlayer+1]-1)) + n] * gradients[outoffset+topology[currentlayer]+n];
}
gradients[outoffset+i] = dow * (1.0 - (outputvals[outoffset+i]*outputvals[outoffset+i]));
gradients[outoffset+i] /= topology[currentlayer+1];
}
}
__global__ void updateInputWeightskernel(double * weights,double * gradients, double* outputvals, int woffset,
int outoffset, double * deltaweights, int *topology, int currlayer){
unsigned int i = blockIdx.x * blockDim.x + threadIdx.x;
if(i < topology[currlayer] - 1){
for(int n = 0; n < topology[currlayer-1]; ++n){
//Neuron &neuron = prevLayer[n];
//double oldDeltaWeight = neuron.m_outputWeights[m_myIndex].deltaWeight;
double newDeltaWeight =
//individual input , magnified by the gradient and train rate
.39
* outputvals[outoffset-topology[currlayer-1]+n]
* gradients[outoffset+i]
//Also add momentum = a fraction of the previos delta weight
+ .1
* deltaweights[woffset + (n*(topology[currlayer]-1)) +i];
deltaweights[woffset + (n*(topology[currlayer]-1)) +i] = newDeltaWeight;
weights[woffset + (n*(topology[currlayer]-1)) +i] += newDeltaWeight;
}
}
}
void Net::backPropParallel(double * targetvals){
cudaMemcpy(targetvals_d,targetvals, sizeof(double)*10, cudaMemcpyHostToDevice);
//calcoutput gradients
int osize = 0;
int wsize = 0;
int osize2 = 0;
int wsize2 = 0;
for(int i=0; i<layers-1; i++){
osize+=m_layers[i].size();
//wsize+=m_layers[i].size()*(m_layers[i+1].size()-1);
}
if(layers>2){
for(int i=0; i<layers-2; i++){
wsize+=m_layers[i].size()*(m_layers[i+1].size()-1);
osize2+=m_layers[i].size();
}
}
wsize2=wsize;
dim3 dim_block(16,1,1);
dim3 dim_grid(1,1,1);
calcOutputGradientskernel<<<dim_grid, dim_block>>>(targetvals_d, outputval_d ,gradients_d, osize);
cudaDeviceSynchronize();
//calc hidden gradients by going backwords through net
if(layers>2){
for(int l = layers - 2; l>0; --l){
dim3 dim_block(512,1,1);
dim3 dim_grid((int)((m_layers[l].size()/512)+1),1,1);
/*
printf("Calc Hidden Kernel Launch\n");
printf("The weight offset: %d\n" , wsize2);
printf("The output offset: %d\n" , osize2);
printf("The Current Layer: %d\n" , l);
*/
calcHiddenGradientskernel<<<dim_grid, dim_block>>>(weights_d,gradients_d,osize2,wsize2,topology_d, l,outputval_d);
cudaDeviceSynchronize();
osize2-=m_layers[l-1].size();
wsize2-=m_layers[l-1].size()*(m_layers[l].size()-1);
}
}
//update input weights
for(int l = layers - 1; l>0; --l){
dim3 dim_block(512,1,1);
dim3 dim_grid((int)((m_layers[l].size()/512)+1),1,1);
/*
printf("Update Inout Weights LAunch\n");
printf("The weight offset: %d\n" , wsize);
printf("The output offset: %d\n" , osize);
printf("The Current Layer: %d\n" , l);
*/
updateInputWeightskernel<<<dim_grid, dim_block>>>(weights_d,gradients_d,outputval_d,wsize, osize, deltaweights_d,topology_d,l);
cudaDeviceSynchronize();
osize-=m_layers[l-1].size();
if(l-2>=0)
wsize-=m_layers[l-2].size()*(m_layers[l-1].size()-1);
}
}
void Net::backProp(const vector<double> &targetVals){
//calculate overall Net error (RMS of output neuron errors)
assert(targetVals.size()==m_layers.back().size()-1);
Layer &outputLayer = m_layers.back();
m_error = 0.0;
for(unsigned n = 0; n< outputLayer.size() - 1; ++n){
double delta = targetVals[n] -outputLayer[n].getOutputVal();
m_error += delta*delta;
}
m_error /= outputLayer.size() - 1;
m_error = sqrt(m_error);
//Implement a recent average measurement
m_recentAverageError =
(m_recentAverageError * m_recentAverageSmoothingFactor + m_error)
/ (m_recentAverageSmoothingFactor + 1.0);
// Calculate output layer gradients
for(unsigned n = 0; n< outputLayer.size() - 1; ++n){
outputLayer[n].calcOutputGradients(targetVals[n]);
}
//calculate gradients on all hidden layers
for(unsigned layerNum = m_layers.size() - 2; layerNum>0; --layerNum){
Layer &hiddenLayer = m_layers[layerNum];
Layer &nextLayer = m_layers[layerNum + 1];
for(unsigned n = 0; n<hiddenLayer.size(); ++n){
hiddenLayer[n].calcHiddenGradients(nextLayer);
}
}
//From all layers from outputs to first hidden layer,
//update connection weights
for(unsigned layerNum = m_layers.size() - 1; layerNum > 0; --layerNum)
{
Layer &layer = m_layers[layerNum];
Layer &prevLayer = m_layers[layerNum - 1];
for(unsigned n=0; n<layer.size() - 1; ++n){
layer[n].updateInputWeights(prevLayer);
}
}
}
|
aa786c563ccedb82a9af7a4bc1724d4bc9f7ca60.hip | // !!! This is a file automatically generated by hipify!!!
//
// techniques.cpp
// Coordinate_descent
//
// Created by Huawei on 04/07/18.
// Copyright 2015 Zhiwei Fan. All rights reserved.
//
#include <hip/hip_runtime.h>
#include <iostream>
#include <fstream>
#include <cmath>
#include <ctime>
#include <stdlib.h>
#include <string.h>
#include "techniques.h"
#include "DataManagement.h"
#include "gradientkl.cu"
#include "linear_models.h"
// #include <hipcub/hipcub.hpp>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
techniques::techniques(){};
/**
Coordinate Descent/Block Coordinate Descent:
(CD/BCD)
Materialize, Stream, Factorize
Stochastic Gradient Descent/Batch Gradient Descent:
(SGD/BGD)
Materialize only
*/
#pragma mark - Stochastic Coordiante Descent
/**
Stochastic Coordinate Descent - Materialize
@param table_T The output table results from the join of the "entity" table S and the "attribute" table R
@param _setting
@param model
@param avail_mem The available memory measured by "sizeof(double)"
*/
// Actually the choice of computation on CPU or GPU should take into memory space on GPU into consideration
// Currently just followed the calculation on CPU, if not avaiable on CPU memory, definitly can't continue to compute on GPU
void techniques::materialize(string table_T, setting _setting, double *&model, double avail_mem, const char *lm)
{
// Object for reading data into memory
DataManagement DM;
DM.message("Start materialize");
// Set Timer
clock_t c_start;
clock_t c_end;
// Get the table information and column names
vector<long> tableInfo(3);
vector<string> fields = DM.getFieldNames(table_T, tableInfo);
int feature_num = (int)tableInfo[1];
long row_num = (long)tableInfo[2];
// For cache, avail_mem_total in Bytes, avail_mem in GB
double avail_mem_total = 1024*1024*1024*avail_mem;
int avail_col = 0;
int avail_cache = 0;
double *cache;
// Primary main memory space: three columns
// Whenever time, must have space for label Y, 1 feature column X, residual H
// Label array
double *Y;
// Residual vector
double *H;
// Buffer for 1 column reading
double *X;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of a single column, total available column
avail_col = avail_mem_total/(sizeof(double)*row_num);
// Calculate the available remaining space measured by size of a single column for cache
avail_cache = avail_col - 3;
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space for training");
exit(1);
}
else if (avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
// can cache all feature column
if( avail_cache >= feature_num - 1 )
{
// cache = new double*[feature_num];
// for(int i = 0; i < feature_num; i ++)
// {
// cache[i] = new double[row_num];
// }
cache = (double*)malloc(sizeof(double)*feature_num*row_num);
// No need to reserve the X buffer to read single column, all in cache
avail_cache = feature_num;
}
else
{
cache = (double*)malloc(sizeof(double)*avail_cache*row_num);
}
}
// Dynamic memory allocation
if(avail_cache < feature_num)
{
// Allocate the memory to X
X = (double*)malloc(sizeof(double)*row_num);
}
Y = (double*)malloc(sizeof(double)*row_num);
H = (double*)malloc(sizeof(double)*row_num);
model = (double*)malloc(sizeof(double)*feature_num);
// Dynamic allocation for device variables
// H, Y definitly need to be allocated & cached
double* dH;
double* dY;
double* dX;
double* cuda_cache;
double* dmul;
// double* mul;
// mul = (double*)malloc(sizeof(double)*row_num);
size_t pitch;
if(avail_cache < feature_num) {
if(hipSuccess != hipMalloc((void**)&dX, row_num * sizeof(double))) {
DM.message("No space on device for single column feature");
exit(1);
}
}
if(hipSuccess != hipMalloc((void**)&dY, row_num * sizeof(double))) {
DM.message("No space on device for class labels");
exit(1);
}
if(hipSuccess != hipMalloc((void**)&dH, row_num * sizeof(double))) {
DM.message("No space on device for remain variables");
exit(1);
}
if(hipSuccess != hipMalloc((void**)&dmul, row_num * sizeof(double))) {
DM.message("No space on device for Intermediate variables");
exit(1);
}
// Actually need to consider the extra variable malloc for reducing
// May need further calculation for cache in the future
printf("avaiable cache number is %d\n", avail_cache);
if(avail_cache > 0) {
if(hipSuccess != hipMallocPitch((void**)&cuda_cache, &pitch, row_num * sizeof(double), avail_cache)) {
DM.message("No space on device for variable cache");
} else {
DM.message("Allocate GPU-Cache successfully on GPU");
printf("Malloc width after padding is %zu bytes\n", pitch);
}
}
// double* dF_partial;
// hipMalloc((void**)&dF_partial, sizeof(double));
// double* dF;
// hipMalloc((void**)&dF, sizeof(double));
// void *d_temp_storage = NULL;
// size_t temp_storage_bytes = 0;
// hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, dmul, dF_partial, row_num);
// hipMalloc((void**)&d_temp_storage, temp_storage_bytes);
DM.fetchColumn(fields[1], row_num, Y);
hipMemcpyAsync(dY, Y, row_num*sizeof(double), hipMemcpyHostToDevice);
hipMemsetAsync(dH, 0, row_num*sizeof(double));
printf("model used %s\n", lm);
// Caching & copy data for training
printf("\n");
printf("Avail_col: %d\n", avail_cache);
for(int i = 0; i < avail_cache; i ++)
{
DM.fetchColumn(fields[i+2], row_num, cache + i*row_num);
}
// printf("Test cache data is %lf\n", *(cache + (avail_cache - 1)*row_num));
if(hipSuccess != hipMemcpy2DAsync(cuda_cache, pitch, cache, row_num*sizeof(double), row_num*sizeof(double), avail_cache, hipMemcpyHostToDevice)) {
DM.message("No enough space on GPU memory for caching feature");
} else {
DM.message("GPU can cache all data into main memory");
}
thrust::device_ptr<double> wrapped_ptr = thrust::device_pointer_cast(dmul);
// Initialization of variables for loss and gradient
// Transfer data between GPU & CPU, dY label & dH always remains in GPU memory
double F = 0.00;
double F_partial = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
memset(model, 0.00, sizeof(double)*feature_num);
// Kernal call parameters
const int threadsPerBlock = 1024;
const int blocksPerGrid = row_num/threadsPerBlock + 1;
// Shuffling process
vector<int> original_index_set;
vector<int> shuffling_index;
for(long i = 0; i < feature_num; i ++)
{
original_index_set.push_back(i);
}
shuffling_index = shuffle(original_index_set, (unsigned)time(NULL));
shuffling_index = original_index_set;
// Training process, maybe too much judge logic to improve the performance
do {
c_start = clock();
// Update one coordinate each time
for(int j = 0; j < feature_num; j ++)
{
int cur_index = shuffling_index.at(j);
F_partial = 0.00;
// hipMemsetAsync(dF_partial, 0, sizeof(double));
hipDeviceSynchronize();
// If the column corresponding to the current updating coordinate is in the cache, no extra I/O is needed
if(cur_index < avail_cache)
{
if(strcmp("lr", lm) == 0)
hipLaunchKernelGGL(( G_lrcache), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, cuda_cache, dmul, cur_index, row_num, pitch);
else if(strcmp("lsr", lm) == 0)
hipLaunchKernelGGL(( G_lsrcache), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, cuda_cache, dmul, cur_index, row_num, pitch);
else if(strcmp("lsvm", lm) == 0)
hipLaunchKernelGGL(( G_svmcache), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, cuda_cache, dmul, cur_index, row_num, pitch);
hipDeviceSynchronize();
F_partial = thrust::reduce(thrust::device, wrapped_ptr, wrapped_ptr + row_num);
// hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, dmul, dF_partial, row_num);
// hipMemcpy(&F_partial, dF_partial,sizeof(double),hipMemcpyDeviceToHost);
// hipMemcpy(mul, dmul, sizeof(double)*row_num, hipMemcpyDeviceToHost);
// #pragma omp parallel for reduction (+:F_partial)
// for(long k = 0; k < row_num; k++)
// F_partial += mul[k];
// printf("F_partial value is %lf\n", F_partial);
}
else
{
// Fetch the column and store the current column into X, not in cache
DM.fetchColumn(fields[cur_index+2], row_num, X);
hipMemcpy(dX, X, sizeof(double) * row_num, hipMemcpyHostToDevice);
// Compute the partial gradient
if(strcmp("lr", lm) == 0)
hipLaunchKernelGGL(( G_lrkl), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, dX, dmul, row_num);
else if(strcmp("lsr", lm) == 0)
hipLaunchKernelGGL(( G_lsrkl), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, dX, dmul, row_num);
else if(strcmp("lsvm", lm) == 0)
hipLaunchKernelGGL(( G_svmkl), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, dX, dmul, row_num);
hipDeviceSynchronize();
F_partial = thrust::reduce(thrust::device, wrapped_ptr, wrapped_ptr + row_num);
// hipMemcpy(mul, dmul, sizeof(double)*row_num, hipMemcpyDeviceToHost);
// #pragma omp parallel for reduction (+:F_partial)
// for(long k = 0; k < row_num; k++)
// F_partial += mul[k];
}
// Store the old W(j)
double W_j = model[cur_index];
// Update the current coordinate
model[cur_index] = model[cur_index] - step_size * (F_partial/row_num);
std::cout << "model[" << cur_index << "]: " << model[cur_index] << std::endl;
double diff = model[cur_index] - W_j;
// Update the intermediate variable
// H = H + (Wj - old_Wj)* X(,j)
if( cur_index < avail_cache)
{
hipLaunchKernelGGL(( H_cache), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dH, cuda_cache, diff, cur_index, pitch, row_num);
hipDeviceSynchronize();
}
else
{
hipLaunchKernelGGL(( Hkl), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dH, dX, diff, row_num);
hipDeviceSynchronize();
}
}
r_prev = F;
// Caculate F
F = 0.00;
// hipMemsetAsync(dF,0,sizeof(double));
if(strcmp("lr", lm) == 0)
hipLaunchKernelGGL(( G_lrloss), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, dmul, row_num);
else if(strcmp("lsr", lm) == 0)
hipLaunchKernelGGL(( G_lsrloss), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, dmul, row_num);
else if(strcmp("lsvm", lm) == 0)
hipLaunchKernelGGL(( G_svmloss), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, dY, dH, dmul, row_num);
hipDeviceSynchronize();
F = thrust::reduce(thrust::device,wrapped_ptr, wrapped_ptr + row_num);
// hipMemcpy(mul, dmul, sizeof(double)*row_num, hipMemcpyDeviceToHost);
// #pragma omp parallel for reduction (+:F)
// for(long k = 0; k < row_num; k++)
// {
// F += mul[k];
// }
// hipcub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, dmul, dF, row_num);
// hipMemcpy(&F, dF, sizeof(double),hipMemcpyDeviceToHost);
F = F/(double)row_num;
cout<<"loss: " <<F<<endl;
r_curr = F;
iters ++;
c_end = clock();
cout<<"Iteration "<<iters-1<<" :"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<"ms\n";
}
while(!stop(iters , r_prev, r_curr, _setting));
free(Y);
free(H);
hipFree(dY);
hipFree(dH);
hipFree(dmul);
hipFree(cuda_cache);
if( avail_cache < feature_num ){
free(X);
hipFree(dX);
}
// Clear the cache
if( avail_cache > 0) {
free(cache);
hipFree(cuda_cache);
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish materialize");
}
/* oid-oid mapping is Not stored in memory */
void techniques::stream(string table_S, string table_R, setting _setting, double *&model, double avail_mem, const char *lm)
{
DataManagement DM;
DM.message("Start stream");
// Set Timer
clock_t c_start;
clock_t c_end;
c_start = clock();
// Get the table information and column names
vector<long> tableInfo_S(3);
vector<long> tableInfo_R(3);
vector<string> fields_S = DM.getFieldNames(table_S, tableInfo_S);
vector<string> fields_R = DM.getFieldNames(table_R, tableInfo_R);
int feature_num_S = (int)tableInfo_S[1];
int feature_num_R = (int)tableInfo_R[1];
int feature_num = feature_num_S + feature_num_R;
long row_num_S = tableInfo_S[2];
long row_num_R = tableInfo_R[2];
// For Cache
long avail_mem_total = 1024*1024*1024*avail_mem;
long avail_cache;
int avail_col_S = 0;
int avail_col_R = 0;
double **cache_R;
double **cache_S;
// Label array
double *Y;
// Residual vector
double *H;
// Buffer for column reading in S
double *X_S;
// Buffer for column reading in R
double *X_R;
// OID-OID Mapping (Key Foreign-Key Mapping Reference)
double *KKMR;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of each column in R and S
avail_cache = avail_mem_total - sizeof(double)*(4*row_num_S + row_num_R);
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space");
exit(1);
}
else if(avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
// First consider caching columns in S
avail_col_S = avail_cache/(sizeof(double)*row_num_S);
if(avail_col_S == 0)
{
DM.message("No space for caching S");
// Then consider caching columns in R
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
else
{
if(avail_col_S >= feature_num_S)
{
cache_S = new double*[feature_num_S];
for(int i = 0; i < feature_num_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
// X_S is still needed to "reconstruct" the complete column from single column fetched from R
avail_col_S = feature_num_S;
}
else
{
cache_S = new double*[avail_col_S];
for(int i = 0; i < avail_col_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
}
// Then consider the caching for R using the remaining caching space
avail_cache = avail_cache - avail_col_S*sizeof(double)*row_num_S;
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
}
// Dynamic memory allocation
if(avail_col_R < feature_num_R)
{
X_R = new double[row_num_R];
}
X_S = new double[row_num_S];
Y = new double[row_num_S];
H = new double[row_num_S];
KKMR = new double[row_num_S];
model = new double[feature_num];
// Initialization of variables for loss and gradient
double F = 0.00;
double F_partial = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
// Initialization
memset(model, 0.00, sizeof(float)*feature_num);
memset(H, 0.00, sizeof(float)*row_num_S);
DM.fetchColumn(fields_S[1], row_num_S, Y);
// Shuffling process
vector<int> original_index_set;
vector<int> shuffling_index;
// Initialize the original_index_set
for(int i = 0; i < feature_num; i ++)
{
original_index_set.push_back(i);
}
shuffling_index = shuffle(original_index_set, (unsigned)time(NULL));
// Caching S
printf("\n");
printf("Avail_col_S: %d\n", avail_col_S);
for(int i = 0; i < avail_col_S; i ++)
{
//printf("Cache %d th column in S\n", i);
DM.fetchColumn(fields_S[3+i], row_num_S, cache_S[i]);
}
// Caching R
printf("\n");
printf("Avail_col_R: %d\n", avail_col_R);
for(int k = 0; k < avail_col_R; k ++)
{
//printf("Cache %d th column in R\n", k);
DM.fetchColumn(fields_R[1+k],row_num_R, cache_R[k]);
}
c_end = clock();
cout<<"Caching:"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<"ms\n";
do
{
c_start = clock();
printf("\n");
DM.message("Start fetching KKMR reference");
// Read the fk column(referred rid in R) in table S, rid column in R
ifstream fk;
// Load the fk to KKMR
fk.open(fields_S[2], ios::in | ios::binary);
// rid.open(table2_fields[0], ios::in | ios::binary);
if(!fk.is_open())
{
DM.errorMessage("Error Message: Cannot load the fk column.");
exit(1);
}
fk.read((char *)KKMR, row_num_S*(sizeof(double)));
fk.close();
DM.message("Finish fetchig KKMR reference");
// Update one coordinate each time
for(int j = 0; j < feature_num; j ++)
{
int cur_index = shuffling_index.at(j);
//printf("Current feature index: %d\n", cur_index);
F_partial = 0.00;
if(cur_index < feature_num_S)
{
// Check cache for S
if(cur_index < avail_col_S)
{
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i],lm)*cache_S[cur_index][i];
}
}
else
{
// Fetch the corresponding column in S and store in X_S
DM.fetchColumn(fields_S[3+cur_index], row_num_S, X_S);
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i], lm)*X_S[i];
}
}
}
else
{
// Check cache for R
int col_index_R = cur_index - feature_num_S;
//printf("col_index_R: %d\n", col_index_R);
if(col_index_R < avail_col_R)
{
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
X_S[m]= cache_R[col_index_R][fk-1];
}
}
else
{
DM.fetchColumn(fields_R[1+col_index_R], row_num_R, X_R);
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
X_S[m]= X_R[fk-1];
}
}
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i],lm)*X_S[i];
}
}
// Store the old W(j)
double W_j = model[cur_index];
// Update the current coordinate
model[cur_index] = model[cur_index] - step_size * F_partial;
double diff = model[cur_index] - W_j;
//Update the intermediate variable
//H = H + (Wj - old_Wj)* X(,j)
if( cur_index < avail_col_S)
{
for(long m = 0; m < row_num_S; m ++ )
{
H[m] = H[m] + diff*cache_S[cur_index][m];
}
}
else
{
for(long m = 0; m < row_num_S; m ++ )
{
H[m] = H[m] + diff*X_S[m];
}
}
}
r_prev = F;
// Caculate F
F = 0.00;
for(long i = 0; i < row_num_S; i ++)
{
double tmp = lossCompute(Y[i],H[i],lm);
F += tmp;
}
r_curr = F;
iters ++;
c_end = clock();
cout<<"Iteration:"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<"ms\n";
}
while(!stop(iters , r_prev, r_curr, _setting));
delete [] Y;
delete [] H;
delete [] X_S;
delete [] KKMR;
if(avail_col_R < feature_num_R)
{
delete [] X_R;
}
// Clear the cache
if(avail_col_S > 0)
{
for(int i = 0; i < avail_col_S; i ++)
{
delete [] cache_S[i];
}
delete [] cache_S;
}
if(avail_col_R > 0)
{
for(int i = 0; i < avail_col_R; i ++)
{
delete [] cache_R[i];
}
delete [] cache_R;
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish stream");
}
void techniques::factorize(string table_S, string table_R, setting _setting, double *&model, double avail_mem, const char *lm)
{
DataManagement DM;
DM.message("Start factorize");
// Set Timer
clock_t c_start;
clock_t c_end;
c_start = clock();
// Get the table information and column names
vector<long> tableInfo_S(3);
vector<long> tableInfo_R(3);
vector<string> fields_S = DM.getFieldNames(table_S, tableInfo_S);
vector<string> fields_R = DM.getFieldNames(table_R, tableInfo_R);
int feature_num_S = (int)tableInfo_S[1];
int feature_num_R = (int)tableInfo_R[1];
int feature_num = feature_num_S + feature_num_R;
long row_num_S = tableInfo_S[2];
long row_num_R = tableInfo_R[2];
// For Cache
long avail_mem_total = 1024*1024*1024*avail_mem;
long avail_cache;
int avail_col_S = 0;
int avail_col_R = 0;
double **cache_R;
double **cache_S;
// Label array
double *Y;
// Residual vector
double *H;
// Buffer for column reading in S
double *X_S;
// Buffer for column reading in R
double *X_R;
// Buffer to store factorized factor when considering column R
double *X_R_f;
// OID-OID Mapping (Key Foreign-Key Mapping Reference, to be kept in memory)
double *KKMR;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of each column in R and S
avail_cache = avail_mem_total - sizeof(double)*(4*row_num_S + 2*row_num_R);
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space");
exit(1);
}
else if(avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
// First consider caching columns in S
avail_col_S = avail_cache/(sizeof(double)*row_num_S);
if(avail_col_S == 0)
{
DM.message("No space for caching S");
X_S = new double[row_num_S];
// Then consider caching columns in R
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
else
{
if(avail_col_S >= (feature_num_S - 1))
{
cache_S = new double*[feature_num_S];
for(int i = 0; i < feature_num_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
//No need to reserve X_S for single column reading
avail_col_S = feature_num_S;
}
else
{
cache_S = new double*[avail_col_S];
for(int i = 0; i < avail_col_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
}
//Then consider the caching for R using the remaining caching space
if(avail_col_S == feature_num_S)
{
avail_cache = avail_cache - (avail_col_S - 1)*sizeof(double)*row_num_S;
}
else
{
avail_cache = avail_cache - avail_col_S*sizeof(double)*row_num_S;
}
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
}
//Dynamic memory alloaction
if(avail_col_S < feature_num_S)
{
X_S = new double[row_num_S];
}
if(avail_col_R < feature_num_R)
{
X_R = new double[row_num_R];
}
model = new double[feature_num];
Y = new double[row_num_S];
H = new double[row_num_S];
X_R_f = new double[row_num_R];
KKMR = new double[row_num_S];
// Initialization of variables for loss and gradient
double F = 0.00;
double F_partial = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
// Initialization
memset(model, 0.00, sizeof(float)*feature_num);
memset(H, 0.00, sizeof(float)*row_num_S);
DM.fetchColumn(fields_S[1], row_num_S, Y);
printf("\n");
DM.message("Start fetching KKMR reference");
// Read the fk column(referred rid in R) in table S, rid column in R
ifstream fk;
// Load the fk to KKMR
fk.open(fields_S[2], ios::in | ios::binary);
// rid.open(table2_fields[0], ios::in | ios::binary);
if(!fk.is_open())
{
DM.errorMessage("Error Message: Cannot load the fk column.");
exit(1);
}
fk.read((char *)KKMR, row_num_S*(sizeof(double)));
fk.close();
DM.message("Finished fetching KKMR reference");
vector<int> original_index_set;
vector<int> shuffling_index;
// Initialize the original_index_set
for(int i = 0; i < feature_num; i ++)
{
original_index_set.push_back(i);
}
// Shuffling
shuffling_index = shuffle(original_index_set, (unsigned)time(NULL));
// Caching S
printf("\n");
printf("Avail_col_S: %d\n", avail_col_S);
for(int i = 0; i < avail_col_S; i ++)
{
// printf("Cache %d th column in S\n", i);
DM.fetchColumn(fields_S[3+i], row_num_S, cache_S[i]);
}
// Caching R
printf("\n");
printf("Avail_col_R: %d\n", avail_col_R);
for(int k = 0; k < avail_col_R; k ++)
{
//printf("Cache %d th column in R\n", k);
DM.fetchColumn(fields_R[1+k],row_num_R, cache_R[k]);
}
c_end = clock();
cout<<"Caching:"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<"ms\n";
do
{
c_start = clock();
// Update one coordinate each time
for(int j = 0; j < feature_num; j ++)
{
int cur_index = shuffling_index.at(j);
//printf("Current feature index: %d\n", cur_index);
F_partial = 0.00;
if(cur_index < feature_num_S)
{
// Check cache for S
if(cur_index < avail_col_S)
{
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i],lm)*cache_S[cur_index][i];
}
}
else
{
// Fetch the corresponding column in S and store in X_S
DM.fetchColumn(fields_S[3+cur_index], row_num_S, X_S);
// Compute the partial gradient
for(int i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i],lm)*X_S[i];
}
}
// Store the old W(j)
double W_j = model[cur_index];
// Update the current coordinate
model[cur_index] = model[cur_index] - step_size * F_partial;
double diff = model[cur_index] - W_j;
// Update the intermediate variable
// H = H + (Wj - old_Wj)* X(,j)
if(cur_index < avail_col_S)
{
for(long m = 0; m < row_num_S; m ++ )
{
H[m] = H[m] + diff*cache_S[cur_index][m];
}
}
else{
for(long m = 0; m < row_num_S; m ++ )
{
H[m] = H[m] + diff*X_S[m];
}
}
}
else
{
memset(X_R_f, 0.00, sizeof(float)*row_num_R);
// Check cache for R
int col_index_R = cur_index - feature_num_S;
// Compute the factorized factor
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
X_R_f[fk-1] += gradientCompute(Y[m],H[m],lm);
}
if(col_index_R < avail_col_R)
{
// Compute the partial gradient
for(long k = 0; k < row_num_R; k ++)
{
F_partial += cache_R[col_index_R][k]*X_R_f[k];
}
}
else
{
DM.fetchColumn(fields_R[1+col_index_R], row_num_R, X_R);
for(long k = 0; k < row_num_R; k ++)
{
F_partial += X_R[k]*X_R_f[k];
}
}
// Store the old W(j)
double W_j = model[cur_index];
// Update the current coordinate
model[cur_index] = model[cur_index] - step_size * F_partial;
double diff = model[cur_index] - W_j;
// Factorized computation
if(col_index_R < avail_col_R)
{
for(long k = 0; k < row_num_R; k ++)
{
X_R_f[k] = diff*cache_R[col_index_R][k];
}
}
else
{
for(long k = 0; k < row_num_R; k ++)
{
X_R_f[k] = diff*X_R[k];
}
}
// Update the intermediate variable
// H = H + (Wj - old_Wj)* X(,j)
for(long m = 0; m < row_num_S; m ++ )
{
long fk = KKMR[m];
H[m] = H[m] + X_R_f[fk-1];
}
}
}
r_prev = F;
// Caculate F
F = 0.00;
for(int i = 0; i < row_num_S; i ++)
{
double tmp = lossCompute(Y[i],H[i],lm);
F += tmp;
}
r_curr = F;
iters ++;
c_end = clock();
cout<<"Iteration:"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<" ms\n";
}
while(!stop(iters, r_prev, r_curr, _setting));
delete [] Y;
delete [] H;
delete [] X_R_f;
delete [] KKMR;
if(avail_col_S < feature_num_S)
{
delete [] X_S;
}
if(avail_col_R < feature_num_R)
{
delete [] X_R;
}
// Clear the cache
if(avail_col_S > 0)
{
for(int i = 0; i < avail_col_S; i ++)
{
delete [] cache_S[i];
}
delete [] cache_S;
}
if(avail_col_R > 0)
{
for(int i = 0; i < avail_col_R; i ++)
{
delete [] cache_R[i];
}
delete [] cache_R;
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish factorize");
}
#pragma mark - Block Coordinate Descent
void techniques::materializeBCD(string table_T, setting _setting, double *&model, int block_size, double avail_mem, const char *lm)
{
DataManagement DM;
DM.message("Start materializeBCD");
// Get the table information and column names
vector<long> tableInfo(3);
vector<string> fields = DM.getFieldNames(table_T, tableInfo);
int feature_num = (int)tableInfo[1];
long row_num = tableInfo[2];
// Block Info
int block_num = feature_num/block_size;
int block_residual = feature_num%block_size;
block_num = block_residual > 0 ? (block_num + 1) : block_num;
// For cache
long avail_mem_total = 1024*1024*1024*avail_mem;
int avail_col = 0;
int avail_cache = 0;
double **cache;
// Label Array
double *Y;
// Residual Vector
double *H;
// Buffer for column reading
double *X;
// Additional columns space reserved for gradient computation
double *G;
double *difference;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of each column
avail_col = avail_mem_total/(sizeof(double)*row_num);
// Calculate the available remaining space for cache
avail_cache = avail_col - 5;
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space");
exit(1);
}
else if (avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
if( avail_cache >= feature_num - 1 )
{
cache = new double*[feature_num];
for(int i = 0; i < feature_num; i ++)
{
cache[i] = new double[row_num];
}
//No need to reserve the X buffer to read single column
avail_cache = feature_num;
}
else
{
cache = new double*[avail_cache];
for(int i = 0; i < avail_cache; i ++)
{
cache[i] = new double[row_num];
}
}
}
// Dynamic memory allocation
if(avail_cache < feature_num)
{
// Allocate the memory to X
X = new double[row_num];
}
Y = new double[row_num];
H = new double[row_num];
G = new double[row_num];
difference = new double[row_num];
model = new double[feature_num];
// Initialization of variables for loss and gradient
double F = 0.00;
double F_partial[block_size];
// Initialize the partial graident for every block
for(int i = 0; i < block_size; i ++)
{
F_partial[i] = 0.00;
}
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
for(int i = 0; i < feature_num; i ++)
{
model[i] = 0.00;
}
for(long i = 0; i < row_num; i ++)
{
H[i] = 0.00;
G[i] = 0.00;
difference[i] = 0.00;
}
DM.fetchColumn(fields[1], row_num, Y);
// Two level shuffling: first shuffling all columns, then all blocks
vector<int> original_index;
vector<int> shuffling_index;
vector<int> original_block_index;
vector<int> shuffling_block_index;
// Initialize the original_index_set
for(int i = 0; i < feature_num; i ++)
{
original_index.push_back(i);
}
for(int i = 0; i < block_num; i ++)
{
original_block_index.push_back(i);
}
// Shuffling
shuffling_index = shuffle(original_index, (unsigned)time(NULL));
shuffling_block_index = shuffle(original_block_index, (unsigned)time(NULL));
// Print the shuffling_index and shuffling_block_index
/**
printf("After shuffling, the feature indexes:\n");
for(int i = 0; i < feature_num; i ++)
{
printf("[%d]\n",shuffling_index.at(i));
}
printf("After shuffling, the block indexes:\n");
for(int i = 0; i < block_num; i ++)
{
printf("[%d]\n",shuffling_block_index.at(i));
}
**/
// Caching
printf("\n");
printf("Avail_col: %d\n", avail_cache);
for(int i = 0; i < avail_cache; i ++)
{
printf("Cache %d th column\n", i);
DM.fetchColumn(fields[i+2],row_num, cache[i]);
}
do
{
// Update one "block" each time
// "Cumulative" difference in H caused by block
for(int j = 0; j < block_num; j ++)
{
int cur_block_index = shuffling_block_index.at(j);
//printf("Current_block_index: %d\n",cur_block_index);
int cur_block_size = 0;
//Check whether the current block is the "residual"
if( (cur_block_index == block_num - 1) && block_residual > 0 )
{
cur_block_size = block_residual;
}
else
{
cur_block_size = block_size;
}
for(long d = 0; d < row_num; d ++)
{
difference[d] = 0.00;
}
// Start with "first level" block index
int block_start_index= 0;
// Double indexing: here, the index is the "index" of the "real index"
// Update each 'block' by starting with getting the block index
block_start_index = cur_block_index*block_size;
//printf("Block_start_index: %d\n",shuffling_index.at(block_start_index));
// First calculate the statistics used for gradient
for(long g = 0; g < row_num; g ++)
{
G[g] = gradientCompute(Y[g],H[g],lm);
}
for(int b = 0; b < cur_block_size; b ++)
{
int cur_index = shuffling_index.at(block_start_index+b);
//printf("Current feature index: %d\n", cur_index);
F_partial[b] = 0.00;
// Check for Cache
if(cur_index < avail_cache)
{
// Compute the partial gradient from cache
for(long i = 0; i < row_num ; i ++)
{
F_partial[b] += G[i]*cache[cur_index][i];
}
}
else
{
// Fetch the column and store the current column into X
DM.fetchColumn(fields[cur_index+2], row_num, X);
// Compute the partial gradient
for(long i = 0; i < row_num ; i ++)
{
F_partial[b] += G[i]*X[i];
}
}
// Store the old W(j)
int cur_model_index = cur_index;
double diff = model[cur_model_index];
// Update the current coordinate
model[cur_model_index] = model[cur_model_index] - step_size * F_partial[b];
// Compute the difference on current coordinate
diff = model[cur_model_index] - diff;
// Update the cumulative difference
if(cur_index < avail_cache)
{
for(long m = 0; m < row_num; m ++)
{
difference[m] += diff*cache[cur_index][m];
}
}
else
{
for(long m = 0; m < row_num; m ++)
{
difference[m] += diff*X[m];
}
}
}
for(long m = 0; m < row_num; m ++ )
{
H[m] = H[m] + difference[m];
}
}
r_prev = F;
// Caculate F
F = 0.00;
for(long i = 0; i < row_num ; i ++)
{
double tmp = lossCompute(Y[i],H[i], lm);
F += tmp;
}
r_curr = F;
iters ++;
}
while(!stop(iters, r_prev, r_curr, _setting));
delete [] Y;
delete [] H;
delete [] G;
delete [] difference;
if(avail_cache < feature_num)
{
delete [] X;
}
// Clear the cache
if(avail_cache > 0)
{
for(int i = 0; i < avail_cache; i ++)
{
delete [] cache[i];
}
delete [] cache;
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish materializeBCD");
}
void techniques::factorizeBCD(string table_S, string table_R, setting _setting, double *&model, int block_size, double avail_mem, const char *lm)
{
DataManagement DM;
DM.message("Start factorizeBCD");
// Get the table information and column names
vector<long> tableInfo_S(3);
vector<long> tableInfo_R(3);
vector<string> fields_S = DM.getFieldNames(table_S, tableInfo_S);
vector<string> fields_R = DM.getFieldNames(table_R, tableInfo_R);
int feature_num_S = (int)tableInfo_S[1];
int feature_num_R = (int)tableInfo_R[1];
int feature_num = feature_num_S + feature_num_R;
int row_num_S = tableInfo_S[2];
int row_num_R = tableInfo_R[2];
// Block Info
int block_num = feature_num/block_size;
int block_residual = feature_num%block_size;
block_num = block_residual > 0 ? (block_num + 1) : block_num;
// For Cache
long avail_mem_total = 1024*1024*1024*avail_mem;;
long avail_cache = 0;
int avail_col_S = 0;
int avail_col_R = 0;
double **cache_R;
double **cache_S;
// Label array
double *Y;
// Residual vector
double *H;
// Buffer for column reading in S
double *X_S;
// Buffer for column reading in R
double *X_R;
// Buffer to store factorized factor when considering column R
double *X_R_f;
// OID-OID Mapping (Key Foreign-Key Mapping Reference, to be kept in memory)
double *KKMR;
// Additional column space reserved for gradient computation
double *G;
double *difference;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of each column in R and S
avail_cache = avail_mem_total - sizeof(double)*(6*row_num_S + 2*row_num_R);
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space");
exit(1);
}
else if(avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
// First consider caching columns in S
avail_col_S = avail_cache/(sizeof(double)*row_num_S);
if(avail_col_S == 0)
{
DM.message("No space for caching S");
// Then consider caching columns in R
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
else
{
if(avail_col_S >= feature_num_S)
{
cache_S = new double*[feature_num_S];
for(int i = 0; i < feature_num_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
// No need to reserve X_S for single column reading
avail_col_S = feature_num_S;
}
else
{
X_S = new double[row_num_S];
cache_S = new double*[avail_col_S];
for(int i = 0; i < avail_col_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
}
// Then consider the caching for R using the remaining caching space
if(avail_col_S == feature_num_S)
{
avail_cache = avail_cache - (avail_col_S - 1)*sizeof(double)*row_num_S;
}
else
{
avail_cache = avail_cache - avail_col_S*sizeof(double)*row_num_S;
}
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
//No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
}
// Dynamic memory allocation
if(avail_col_S < feature_num_S)
{
X_S = new double[row_num_S];
}
if(avail_col_R < feature_num_R)
{
X_R = new double[row_num_R];
}
Y = new double[row_num_S];
H = new double[row_num_S];
X_R_f = new double[row_num_R];
G = new double[row_num_S];
difference = new double[row_num_S];
KKMR = new double[row_num_S];
model = new double[feature_num];
// Initialization of variables for loss and gradient
double F = 0.00;
double F_partial[block_size];
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
// Initialize the partial graident for every block
for(int i = 0; i < block_size; i ++)
{
F_partial[i] = 0.00;
}
// Initialization
for(int i = 0; i < feature_num; i ++)
{
model[i] = 0.00;
}
for(long i = 0; i < row_num_S; i ++)
{
H[i] = 0.00;
G[i] = 0.00;
difference[i] = 0.00;
}
for(long i = 0; i < row_num_R; i ++)
{
X_R_f[i] = 0.00;
}
DM.fetchColumn(fields_S[1], row_num_S, Y);
printf("\n");
DM.message("Start fetching KKMR reference");
// Read the fk column(referred rid in R) in table S, rid column in R
ifstream fk;
// Load the fk to KKMR
fk.open(fields_S[2], ios::in | ios::binary);
// rid.open(table2_fields[0], ios::in | ios::binary);
if(!fk.is_open())
{
DM.errorMessage("Error Message: Cannot load the fk column.");
exit(1);
}
fk.read((char *)KKMR, row_num_S*(sizeof(double)));
fk.close();
DM.message("Finished fetching KKMR reference");
//Two level shuffling: first shuffling all columns, then all blocks
vector<int> original_index;
vector<int> shuffling_index;
vector<int> original_block_index;
vector<int> shuffling_block_index;
// Initialize the original_index_set
for(int i = 0; i < feature_num; i ++)
{
original_index.push_back(i);
}
for(int i = 0; i < block_num; i ++)
{
original_block_index.push_back(i);
}
// Shuffling
shuffling_index = shuffle(original_index, (unsigned)time(NULL));
shuffling_block_index = shuffle(original_block_index, (unsigned)time(NULL));
// Print the shuffling_index and shuffling_block_index
/**
printf("After shuffling, the feature indexes:\n");
for(int i = 0; i < feature_num; i ++)
{
printf("[%d]\n",shuffling_index.at(i));
}
//printf("After shuffling, the block indexes:\n");
for(int i = 0; i < block_num; i ++)
{
printf("[%d]\n",shuffling_block_index.at(i));
}
**/
// Caching S
printf("\n");
printf("Avail_col_S: %d\n", avail_col_S);
for(int i = 0; i < avail_col_S; i ++)
{
printf("Cache %d th column in S\n", i);
DM.fetchColumn(fields_S[3+i], row_num_S, cache_S[i]);
}
// Caching R
printf("\n");
printf("Avail_col_R: %d\n", avail_col_R);
for(int k = 0; k < avail_col_R; k ++)
{
printf("Cache %d th column in R\n", k);
DM.fetchColumn(fields_R[1+k],row_num_R, cache_R[k]);
}
do
{
// Update one "block" each time
// "Cumulative" difference in H caused by block
for(int j = 0; j < block_num; j ++)
{
int cur_block_index = shuffling_block_index.at(j);
//printf("Current_block_index: %d\n",cur_block_index);
int cur_block_size = 0;
//Check whether the current block is the "residual"
if( (cur_block_index == block_num - 1) && block_residual > 0 )
{
cur_block_size = block_residual;
}
else
{
cur_block_size = block_size;
}
for(long d = 0; d < row_num_S; d ++)
{
difference[d] = 0.00;
}
// Start with "first level" block index
int block_start_index= 0;
// Double indexing: here, the index is the "index" of the "real index"
// Update each 'block' by starting with getting the block index
block_start_index = cur_block_index*block_size;
//printf("Block_start_index: %d\n", shuffling_index.at(block_start_index));
// First calculate the statistics used for gradient
for(long g = 0; g < row_num_S; g ++)
{
G[g] = gradientCompute(Y[g],H[g],lm);
}
for(int b = 0; b < cur_block_size; b ++)
{
int cur_index = shuffling_index.at(block_start_index + b);
//printf("Current feature index: %d\n", cur_index);;
F_partial[b] = 0.00;
// Check whether the column is in table R. If it is, applied factorized learning
if(cur_index < feature_num_S)
{
// Check cache for S
if(cur_index < avail_col_S)
{
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial[b] += G[i]*cache_S[cur_index][i];
}
}
else
{
// Fetch each column and store the column into X
DM.fetchColumn(fields_S[cur_index+3], row_num_S, X_S);
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial[b] += G[i]*X_S[i];
}
}
// Store the old Wj
int cur_model_index = cur_index;
double W_j = model[cur_model_index];
// Update the current coordinate
model[cur_model_index] = model[cur_model_index] - step_size * F_partial[b];
// Compute the difference
double diff = model[cur_model_index] - W_j;
// Update the cumulative difference
if(cur_index < avail_col_S)
{
for(long m = 0; m < row_num_S; m ++)
{
difference[m] += diff*cache_S[cur_index][m];
}
}
else
{
for(long m = 0; m < row_num_S; m ++)
{
difference[m] += diff*X_S[m];
}
}
}
else
{
for(long i = 0; i < row_num_R; i ++)
{
X_R_f[i] = 0.00;
}
// Check cache for R
int col_index_R = cur_index - feature_num_S;
//printf("col_index_R: %d\n",col_index_R);
// Apply factorized learning to gradient computation
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
X_R_f[fk-1] += G[m];
}
if(col_index_R < avail_col_R)
{
for(long j = 0; j < row_num_R; j ++)
{
F_partial[b] += cache_R[col_index_R][j]*X_R_f[j];
}
}
else
{
// Fetch the corresponding column in R
DM.fetchColumn(fields_R[1+col_index_R],row_num_R, X_R);
for(long j = 0; j < row_num_R; j ++)
{
F_partial[b] += X_R[j]*X_R_f[j];
}
}
int cur_model_index = cur_index;
double W_j = model[cur_model_index];
model[cur_model_index] = model[cur_model_index] - step_size * F_partial[b];
double diff = model[cur_model_index] - W_j;
// Apply factorized learning to difference (of model/coordinate) computation
if(col_index_R < avail_col_R)
{
for(int i = 0; i < row_num_R; i ++ )
{
X_R_f[i] = diff*cache_R[col_index_R][i];
}
}
else
{
for(int i = 0; i < row_num_R; i ++ )
{
X_R_f[i] = diff*X_R[i];
}
}
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
difference[m] += X_R_f[fk-1];
}
}
}
for(long m = 0; m < row_num_S; m ++)
{
H[m] = H[m] + difference[m];
}
}
r_prev = F;
// Caculate F
F = 0.00;
for(long i = 0; i < row_num_S; i ++)
{
double tmp = lossCompute(Y[i],H[i],lm);
F += tmp;
}
r_curr = F;
iters ++;
}
while(!stop(iters, r_prev, r_curr, _setting));
delete [] Y;
delete [] H;
delete [] X_R_f;
delete [] KKMR;
delete [] G;
delete [] difference;
if(avail_col_S < feature_num_S)
{
delete [] X_S;
}
if(avail_col_R < feature_num_R)
{
delete [] X_R;
}
// Clear Cache
if(avail_col_R > 0)
{
for(int i = 0; i < avail_col_R; i ++)
{
delete [] cache_R[i];
}
delete [] cache_R;
}
if(avail_col_S > 0)
{
for(int i = 0; i < avail_col_S; i ++)
{
delete [] cache_S[i];
}
delete [] cache_S;
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish factorizeBCD");
}
#pragma mark - Gradient descent
/*
Read a single file the columns of which are in format like: id, label, feature
The offset entry for W0 is not considered for now
Logistic Regression for now
*/
// Specific techniques selection: flag (for generalization purpose)
// Stochastic Gradient Descent
void techniques::SGD(vector< vector<double> > data, setting _setting, double *&model, int feature_num)
{
DataManagement::message("Start SGD");
long data_size = data.size();
vector<long> original_index_set;
vector<long> shuffling_index;
//Initialize the original_index_set
std::cout << "Start building the index set" << std::endl;
for(long i = 0; i < data_size; i ++)
{
original_index_set.push_back(i);
}
// Shuffling
shuffling_index = shuffle(original_index_set, (unsigned)time(NULL));
// Setting
double step_size = _setting.step_size;
// Allocate the memory to model
model = new double[feature_num];
for(int i = 0; i < feature_num; i ++)
{
model[i] = 0.00;
}
// Loss Function
double F = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
std::cout << "Start training" << std::endl;
do
{
r_prev = F;
F = 0.00;
vector<double> gradient(feature_num,0.00);
for(long j = 0; j < data_size; j ++)
{
long cur_index = shuffling_index[j];
// Update the model
double output = 0.00;
for(int k = 0; k < feature_num; k ++)
{
output += model[k]*data[cur_index][k+2];
}
for(int k = 0; k < feature_num; k ++)
{
gradient[k] = gradientCompute(data[cur_index][1],output, "lr")*data[cur_index][2+k];
model[k] = model[k]-step_size*gradient[k];
}
}
// Calculate F
for(long j = 0; j < data_size; j ++)
{
double output = 0.00;
for(int k = 0; k < feature_num; k ++)
{
output += model[k]*data[j][k+2];
}
double tmp = lossCompute(data[j][1], output, "lr");
F += tmp;
}
r_curr = F;
std::cout << "Loss: " << F << std::endl;
iters ++;
}
while(!stop(iters ,r_prev,r_curr,_setting));
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DataManagement::message("Finish SGD");
}
#pragma mark - Batch Gradient Descent
// Batch Gradient Descent
void techniques::BGD(vector< vector<double> > data, setting _setting, double *&model, int feature_num)
{
DataManagement::message("Start BGD");
long data_size = data.size();
// Setting
double step_size = _setting.step_size;
// Allocate the memory to the model
model = new double[feature_num];
for(int i = 0; i < feature_num; i ++)
{
model[i] = 0.00;
}
// Loss Function
double F = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
do
{
r_prev = F;
F = 0.00;
vector<double> gradient(feature_num,0.00);
for(long j = 0; j < data_size; j ++)
{
// Update the model
double output = 0.00;
for(int k = 0; k < feature_num; k ++)
{
output += model[k]*data[j][2+k];
}
for(int k = 0; k < feature_num; k ++)
{
gradient[k] += gradientCompute(data[j][1],output, "lm")*data[j][2+k];
}
}
for(int k = 0; k < feature_num; k ++)
{
model[k] = model[k]-step_size*gradient[k];
}
for(long j = 0; j < data_size; j ++)
{
double output = 0.00;
for(int k = 0; k < feature_num; k ++)
{
output += model[k]*data[j][2+k];
}
double tmp = lossCompute(data[j][1], output, "lm");
printf("tmp loss: %f\n", tmp);
F += tmp;
}
r_curr = F;
printf("The loss: %lf\n",F);
iters ++;
}
while(!stop(iters ,r_prev,r_curr,_setting));
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DataManagement::message("Finish BGD");
}
void techniques::classify(vector< vector<double> > data, vector<double> model)
{
// Count the number of correct classifcation
long count = 0;
long data_size = data.size();
if(data.at(0).size() != model.size()+2)
{
DataManagement::errorMessage("Inconsistent file provided");
}
int featureNum = (int)model.size();
for(long i = 0; i < data_size; i ++)
{
double actual_label = data[i][1];
double predicted_label = 0.00;
double confidence = 0.00;
double output = 0.00;
for(int j = 0; j < featureNum; j ++)
{
output += model[j]*data[i][2+j];
}
printf("W^TX: %f\n", output);
confidence = C_lr(output);
if(confidence > 0.5)
{
predicted_label = 1.00;
}
else
{
predicted_label = -1.00;
}
if(actual_label == predicted_label)
{
printf("Prediction Correct\n");
count++;
}
else
{
printf("Prediction Wrong\n");
}
printf("Confidence: %f\n", confidence);
printf("Actual Label: %f , Predicted Label: %f\n", actual_label, predicted_label);
}
printf("Correcteness: %f \n", (double)count/(double)data_size);
}
#pragma mark - shuffling
vector<int> techniques::shuffle(vector<int> &index_set, unsigned seed)
{
vector<int> original_set = index_set;
int size = (int)index_set.size();
vector<int> new_index_set;
srand (seed);
for(int i = 0; i < size; i ++)
{
int cur_size = (int)original_set.size();
int rand_index = random()%cur_size;
new_index_set.push_back(original_set.at(rand_index));
original_set.erase(original_set.begin()+rand_index);
}
return new_index_set;
}
vector<long> techniques::shuffle(vector<long> &index_set, unsigned seed)
{
vector<long> original_set = index_set;
long size = (long)index_set.size();
vector<long> new_index_set;
srand(seed);
for(long i = 0; i < size; i ++)
{
long cur_size = original_set.size();
long rand_index = random()%cur_size;
new_index_set.push_back(original_set.at(rand_index));
original_set.erase(original_set.begin()+rand_index);
}
return new_index_set;
}
#pragma mark - stop creteria
bool techniques::stop(int k, double r_prev, double r_curr, setting &setting)
{
double iter_num = k;
double difference = abs(r_prev - r_curr);
if( iter_num == setting.iter_num || difference <= setting.error)
{
return true;
}
else
{
return false;
}
}
#pragma mark - print the final result
void techniques::outputResults(double r_curr, int feature_num, int k, double *&model)
{
printf("The final loss: %lf\n", r_curr);
printf("Number of iteration: %d\n", k);
printf("Model: ");
for(int i = 0; i < feature_num; i ++)
{
if(i == feature_num - 1)
{
printf("%.20f\n",model[i]);
}
else
{
printf("%.20f, ",model[i]);
}
}
}
| aa786c563ccedb82a9af7a4bc1724d4bc9f7ca60.cu | //
// techniques.cpp
// Coordinate_descent
//
// Created by Huawei on 04/07/18.
// Copyright © 2015 Zhiwei Fan. All rights reserved.
//
#include <cuda.h>
#include <iostream>
#include <fstream>
#include <cmath>
#include <ctime>
#include <stdlib.h>
#include <string.h>
#include "techniques.h"
#include "DataManagement.h"
#include "gradientkl.cu"
#include "linear_models.h"
// #include <cub/cub.cuh>
#include <thrust/reduce.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
techniques::techniques(){};
/**
Coordinate Descent/Block Coordinate Descent:
(CD/BCD)
Materialize, Stream, Factorize
Stochastic Gradient Descent/Batch Gradient Descent:
(SGD/BGD)
Materialize only
*/
#pragma mark - Stochastic Coordiante Descent
/**
Stochastic Coordinate Descent - Materialize
@param table_T The output table results from the join of the "entity" table S and the "attribute" table R
@param _setting
@param model
@param avail_mem The available memory measured by "sizeof(double)"
*/
// Actually the choice of computation on CPU or GPU should take into memory space on GPU into consideration
// Currently just followed the calculation on CPU, if not avaiable on CPU memory, definitly can't continue to compute on GPU
void techniques::materialize(string table_T, setting _setting, double *&model, double avail_mem, const char *lm)
{
// Object for reading data into memory
DataManagement DM;
DM.message("Start materialize");
// Set Timer
clock_t c_start;
clock_t c_end;
// Get the table information and column names
vector<long> tableInfo(3);
vector<string> fields = DM.getFieldNames(table_T, tableInfo);
int feature_num = (int)tableInfo[1];
long row_num = (long)tableInfo[2];
// For cache, avail_mem_total in Bytes, avail_mem in GB
double avail_mem_total = 1024*1024*1024*avail_mem;
int avail_col = 0;
int avail_cache = 0;
double *cache;
// Primary main memory space: three columns
// Whenever time, must have space for label Y, 1 feature column X, residual H
// Label array
double *Y;
// Residual vector
double *H;
// Buffer for 1 column reading
double *X;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of a single column, total available column
avail_col = avail_mem_total/(sizeof(double)*row_num);
// Calculate the available remaining space measured by size of a single column for cache
avail_cache = avail_col - 3;
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space for training");
exit(1);
}
else if (avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
// can cache all feature column
if( avail_cache >= feature_num - 1 )
{
// cache = new double*[feature_num];
// for(int i = 0; i < feature_num; i ++)
// {
// cache[i] = new double[row_num];
// }
cache = (double*)malloc(sizeof(double)*feature_num*row_num);
// No need to reserve the X buffer to read single column, all in cache
avail_cache = feature_num;
}
else
{
cache = (double*)malloc(sizeof(double)*avail_cache*row_num);
}
}
// Dynamic memory allocation
if(avail_cache < feature_num)
{
// Allocate the memory to X
X = (double*)malloc(sizeof(double)*row_num);
}
Y = (double*)malloc(sizeof(double)*row_num);
H = (double*)malloc(sizeof(double)*row_num);
model = (double*)malloc(sizeof(double)*feature_num);
// Dynamic allocation for device variables
// H, Y definitly need to be allocated & cached
double* dH;
double* dY;
double* dX;
double* cuda_cache;
double* dmul;
// double* mul;
// mul = (double*)malloc(sizeof(double)*row_num);
size_t pitch;
if(avail_cache < feature_num) {
if(cudaSuccess != cudaMalloc((void**)&dX, row_num * sizeof(double))) {
DM.message("No space on device for single column feature");
exit(1);
}
}
if(cudaSuccess != cudaMalloc((void**)&dY, row_num * sizeof(double))) {
DM.message("No space on device for class labels");
exit(1);
}
if(cudaSuccess != cudaMalloc((void**)&dH, row_num * sizeof(double))) {
DM.message("No space on device for remain variables");
exit(1);
}
if(cudaSuccess != cudaMalloc((void**)&dmul, row_num * sizeof(double))) {
DM.message("No space on device for Intermediate variables");
exit(1);
}
// Actually need to consider the extra variable malloc for reducing
// May need further calculation for cache in the future
printf("avaiable cache number is %d\n", avail_cache);
if(avail_cache > 0) {
if(cudaSuccess != cudaMallocPitch((void**)&cuda_cache, &pitch, row_num * sizeof(double), avail_cache)) {
DM.message("No space on device for variable cache");
} else {
DM.message("Allocate GPU-Cache successfully on GPU");
printf("Malloc width after padding is %zu bytes\n", pitch);
}
}
// double* dF_partial;
// cudaMalloc((void**)&dF_partial, sizeof(double));
// double* dF;
// cudaMalloc((void**)&dF, sizeof(double));
// void *d_temp_storage = NULL;
// size_t temp_storage_bytes = 0;
// cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, dmul, dF_partial, row_num);
// cudaMalloc((void**)&d_temp_storage, temp_storage_bytes);
DM.fetchColumn(fields[1], row_num, Y);
cudaMemcpyAsync(dY, Y, row_num*sizeof(double), cudaMemcpyHostToDevice);
cudaMemsetAsync(dH, 0, row_num*sizeof(double));
printf("model used %s\n", lm);
// Caching & copy data for training
printf("\n");
printf("Avail_col: %d\n", avail_cache);
for(int i = 0; i < avail_cache; i ++)
{
DM.fetchColumn(fields[i+2], row_num, cache + i*row_num);
}
// printf("Test cache data is %lf\n", *(cache + (avail_cache - 1)*row_num));
if(cudaSuccess != cudaMemcpy2DAsync(cuda_cache, pitch, cache, row_num*sizeof(double), row_num*sizeof(double), avail_cache, cudaMemcpyHostToDevice)) {
DM.message("No enough space on GPU memory for caching feature");
} else {
DM.message("GPU can cache all data into main memory");
}
thrust::device_ptr<double> wrapped_ptr = thrust::device_pointer_cast(dmul);
// Initialization of variables for loss and gradient
// Transfer data between GPU & CPU, dY label & dH always remains in GPU memory
double F = 0.00;
double F_partial = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
memset(model, 0.00, sizeof(double)*feature_num);
// Kernal call parameters
const int threadsPerBlock = 1024;
const int blocksPerGrid = row_num/threadsPerBlock + 1;
// Shuffling process
vector<int> original_index_set;
vector<int> shuffling_index;
for(long i = 0; i < feature_num; i ++)
{
original_index_set.push_back(i);
}
shuffling_index = shuffle(original_index_set, (unsigned)time(NULL));
shuffling_index = original_index_set;
// Training process, maybe too much judge logic to improve the performance
do {
c_start = clock();
// Update one coordinate each time
for(int j = 0; j < feature_num; j ++)
{
int cur_index = shuffling_index.at(j);
F_partial = 0.00;
// cudaMemsetAsync(dF_partial, 0, sizeof(double));
cudaDeviceSynchronize();
// If the column corresponding to the current updating coordinate is in the cache, no extra I/O is needed
if(cur_index < avail_cache)
{
if(strcmp("lr", lm) == 0)
G_lrcache<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, cuda_cache, dmul, cur_index, row_num, pitch);
else if(strcmp("lsr", lm) == 0)
G_lsrcache<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, cuda_cache, dmul, cur_index, row_num, pitch);
else if(strcmp("lsvm", lm) == 0)
G_svmcache<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, cuda_cache, dmul, cur_index, row_num, pitch);
cudaDeviceSynchronize();
F_partial = thrust::reduce(thrust::device, wrapped_ptr, wrapped_ptr + row_num);
// cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, dmul, dF_partial, row_num);
// cudaMemcpy(&F_partial, dF_partial,sizeof(double),cudaMemcpyDeviceToHost);
// cudaMemcpy(mul, dmul, sizeof(double)*row_num, cudaMemcpyDeviceToHost);
// #pragma omp parallel for reduction (+:F_partial)
// for(long k = 0; k < row_num; k++)
// F_partial += mul[k];
// printf("F_partial value is %lf\n", F_partial);
}
else
{
// Fetch the column and store the current column into X, not in cache
DM.fetchColumn(fields[cur_index+2], row_num, X);
cudaMemcpy(dX, X, sizeof(double) * row_num, cudaMemcpyHostToDevice);
// Compute the partial gradient
if(strcmp("lr", lm) == 0)
G_lrkl<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, dX, dmul, row_num);
else if(strcmp("lsr", lm) == 0)
G_lsrkl<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, dX, dmul, row_num);
else if(strcmp("lsvm", lm) == 0)
G_svmkl<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, dX, dmul, row_num);
cudaDeviceSynchronize();
F_partial = thrust::reduce(thrust::device, wrapped_ptr, wrapped_ptr + row_num);
// cudaMemcpy(mul, dmul, sizeof(double)*row_num, cudaMemcpyDeviceToHost);
// #pragma omp parallel for reduction (+:F_partial)
// for(long k = 0; k < row_num; k++)
// F_partial += mul[k];
}
// Store the old W(j)
double W_j = model[cur_index];
// Update the current coordinate
model[cur_index] = model[cur_index] - step_size * (F_partial/row_num);
std::cout << "model[" << cur_index << "]: " << model[cur_index] << std::endl;
double diff = model[cur_index] - W_j;
// Update the intermediate variable
// H = H + (Wj - old_Wj)* X(,j)
if( cur_index < avail_cache)
{
H_cache<<<blocksPerGrid, threadsPerBlock>>>(dH, cuda_cache, diff, cur_index, pitch, row_num);
cudaDeviceSynchronize();
}
else
{
Hkl<<<blocksPerGrid, threadsPerBlock>>>(dH, dX, diff, row_num);
cudaDeviceSynchronize();
}
}
r_prev = F;
// Caculate F
F = 0.00;
// cudaMemsetAsync(dF,0,sizeof(double));
if(strcmp("lr", lm) == 0)
G_lrloss<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, dmul, row_num);
else if(strcmp("lsr", lm) == 0)
G_lsrloss<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, dmul, row_num);
else if(strcmp("lsvm", lm) == 0)
G_svmloss<<<blocksPerGrid, threadsPerBlock>>>(dY, dH, dmul, row_num);
cudaDeviceSynchronize();
F = thrust::reduce(thrust::device,wrapped_ptr, wrapped_ptr + row_num);
// cudaMemcpy(mul, dmul, sizeof(double)*row_num, cudaMemcpyDeviceToHost);
// #pragma omp parallel for reduction (+:F)
// for(long k = 0; k < row_num; k++)
// {
// F += mul[k];
// }
// cub::DeviceReduce::Sum(d_temp_storage, temp_storage_bytes, dmul, dF, row_num);
// cudaMemcpy(&F, dF, sizeof(double),cudaMemcpyDeviceToHost);
F = F/(double)row_num;
cout<<"loss: " <<F<<endl;
r_curr = F;
iters ++;
c_end = clock();
cout<<"Iteration "<<iters-1<<" :"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<"ms\n";
}
while(!stop(iters , r_prev, r_curr, _setting));
free(Y);
free(H);
cudaFree(dY);
cudaFree(dH);
cudaFree(dmul);
cudaFree(cuda_cache);
if( avail_cache < feature_num ){
free(X);
cudaFree(dX);
}
// Clear the cache
if( avail_cache > 0) {
free(cache);
cudaFree(cuda_cache);
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish materialize");
}
/* oid-oid mapping is Not stored in memory */
void techniques::stream(string table_S, string table_R, setting _setting, double *&model, double avail_mem, const char *lm)
{
DataManagement DM;
DM.message("Start stream");
// Set Timer
clock_t c_start;
clock_t c_end;
c_start = clock();
// Get the table information and column names
vector<long> tableInfo_S(3);
vector<long> tableInfo_R(3);
vector<string> fields_S = DM.getFieldNames(table_S, tableInfo_S);
vector<string> fields_R = DM.getFieldNames(table_R, tableInfo_R);
int feature_num_S = (int)tableInfo_S[1];
int feature_num_R = (int)tableInfo_R[1];
int feature_num = feature_num_S + feature_num_R;
long row_num_S = tableInfo_S[2];
long row_num_R = tableInfo_R[2];
// For Cache
long avail_mem_total = 1024*1024*1024*avail_mem;
long avail_cache;
int avail_col_S = 0;
int avail_col_R = 0;
double **cache_R;
double **cache_S;
// Label array
double *Y;
// Residual vector
double *H;
// Buffer for column reading in S
double *X_S;
// Buffer for column reading in R
double *X_R;
// OID-OID Mapping (Key Foreign-Key Mapping Reference)
double *KKMR;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of each column in R and S
avail_cache = avail_mem_total - sizeof(double)*(4*row_num_S + row_num_R);
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space");
exit(1);
}
else if(avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
// First consider caching columns in S
avail_col_S = avail_cache/(sizeof(double)*row_num_S);
if(avail_col_S == 0)
{
DM.message("No space for caching S");
// Then consider caching columns in R
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
else
{
if(avail_col_S >= feature_num_S)
{
cache_S = new double*[feature_num_S];
for(int i = 0; i < feature_num_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
// X_S is still needed to "reconstruct" the complete column from single column fetched from R
avail_col_S = feature_num_S;
}
else
{
cache_S = new double*[avail_col_S];
for(int i = 0; i < avail_col_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
}
// Then consider the caching for R using the remaining caching space
avail_cache = avail_cache - avail_col_S*sizeof(double)*row_num_S;
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
}
// Dynamic memory allocation
if(avail_col_R < feature_num_R)
{
X_R = new double[row_num_R];
}
X_S = new double[row_num_S];
Y = new double[row_num_S];
H = new double[row_num_S];
KKMR = new double[row_num_S];
model = new double[feature_num];
// Initialization of variables for loss and gradient
double F = 0.00;
double F_partial = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
// Initialization
memset(model, 0.00, sizeof(float)*feature_num);
memset(H, 0.00, sizeof(float)*row_num_S);
DM.fetchColumn(fields_S[1], row_num_S, Y);
// Shuffling process
vector<int> original_index_set;
vector<int> shuffling_index;
// Initialize the original_index_set
for(int i = 0; i < feature_num; i ++)
{
original_index_set.push_back(i);
}
shuffling_index = shuffle(original_index_set, (unsigned)time(NULL));
// Caching S
printf("\n");
printf("Avail_col_S: %d\n", avail_col_S);
for(int i = 0; i < avail_col_S; i ++)
{
//printf("Cache %d th column in S\n", i);
DM.fetchColumn(fields_S[3+i], row_num_S, cache_S[i]);
}
// Caching R
printf("\n");
printf("Avail_col_R: %d\n", avail_col_R);
for(int k = 0; k < avail_col_R; k ++)
{
//printf("Cache %d th column in R\n", k);
DM.fetchColumn(fields_R[1+k],row_num_R, cache_R[k]);
}
c_end = clock();
cout<<"Caching:"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<"ms\n";
do
{
c_start = clock();
printf("\n");
DM.message("Start fetching KKMR reference");
// Read the fk column(referred rid in R) in table S, rid column in R
ifstream fk;
// Load the fk to KKMR
fk.open(fields_S[2], ios::in | ios::binary);
// rid.open(table2_fields[0], ios::in | ios::binary);
if(!fk.is_open())
{
DM.errorMessage("Error Message: Cannot load the fk column.");
exit(1);
}
fk.read((char *)KKMR, row_num_S*(sizeof(double)));
fk.close();
DM.message("Finish fetchig KKMR reference");
// Update one coordinate each time
for(int j = 0; j < feature_num; j ++)
{
int cur_index = shuffling_index.at(j);
//printf("Current feature index: %d\n", cur_index);
F_partial = 0.00;
if(cur_index < feature_num_S)
{
// Check cache for S
if(cur_index < avail_col_S)
{
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i],lm)*cache_S[cur_index][i];
}
}
else
{
// Fetch the corresponding column in S and store in X_S
DM.fetchColumn(fields_S[3+cur_index], row_num_S, X_S);
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i], lm)*X_S[i];
}
}
}
else
{
// Check cache for R
int col_index_R = cur_index - feature_num_S;
//printf("col_index_R: %d\n", col_index_R);
if(col_index_R < avail_col_R)
{
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
X_S[m]= cache_R[col_index_R][fk-1];
}
}
else
{
DM.fetchColumn(fields_R[1+col_index_R], row_num_R, X_R);
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
X_S[m]= X_R[fk-1];
}
}
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i],lm)*X_S[i];
}
}
// Store the old W(j)
double W_j = model[cur_index];
// Update the current coordinate
model[cur_index] = model[cur_index] - step_size * F_partial;
double diff = model[cur_index] - W_j;
//Update the intermediate variable
//H = H + (Wj - old_Wj)* X(,j)
if( cur_index < avail_col_S)
{
for(long m = 0; m < row_num_S; m ++ )
{
H[m] = H[m] + diff*cache_S[cur_index][m];
}
}
else
{
for(long m = 0; m < row_num_S; m ++ )
{
H[m] = H[m] + diff*X_S[m];
}
}
}
r_prev = F;
// Caculate F
F = 0.00;
for(long i = 0; i < row_num_S; i ++)
{
double tmp = lossCompute(Y[i],H[i],lm);
F += tmp;
}
r_curr = F;
iters ++;
c_end = clock();
cout<<"Iteration:"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<"ms\n";
}
while(!stop(iters , r_prev, r_curr, _setting));
delete [] Y;
delete [] H;
delete [] X_S;
delete [] KKMR;
if(avail_col_R < feature_num_R)
{
delete [] X_R;
}
// Clear the cache
if(avail_col_S > 0)
{
for(int i = 0; i < avail_col_S; i ++)
{
delete [] cache_S[i];
}
delete [] cache_S;
}
if(avail_col_R > 0)
{
for(int i = 0; i < avail_col_R; i ++)
{
delete [] cache_R[i];
}
delete [] cache_R;
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish stream");
}
void techniques::factorize(string table_S, string table_R, setting _setting, double *&model, double avail_mem, const char *lm)
{
DataManagement DM;
DM.message("Start factorize");
// Set Timer
clock_t c_start;
clock_t c_end;
c_start = clock();
// Get the table information and column names
vector<long> tableInfo_S(3);
vector<long> tableInfo_R(3);
vector<string> fields_S = DM.getFieldNames(table_S, tableInfo_S);
vector<string> fields_R = DM.getFieldNames(table_R, tableInfo_R);
int feature_num_S = (int)tableInfo_S[1];
int feature_num_R = (int)tableInfo_R[1];
int feature_num = feature_num_S + feature_num_R;
long row_num_S = tableInfo_S[2];
long row_num_R = tableInfo_R[2];
// For Cache
long avail_mem_total = 1024*1024*1024*avail_mem;
long avail_cache;
int avail_col_S = 0;
int avail_col_R = 0;
double **cache_R;
double **cache_S;
// Label array
double *Y;
// Residual vector
double *H;
// Buffer for column reading in S
double *X_S;
// Buffer for column reading in R
double *X_R;
// Buffer to store factorized factor when considering column R
double *X_R_f;
// OID-OID Mapping (Key Foreign-Key Mapping Reference, to be kept in memory)
double *KKMR;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of each column in R and S
avail_cache = avail_mem_total - sizeof(double)*(4*row_num_S + 2*row_num_R);
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space");
exit(1);
}
else if(avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
// First consider caching columns in S
avail_col_S = avail_cache/(sizeof(double)*row_num_S);
if(avail_col_S == 0)
{
DM.message("No space for caching S");
X_S = new double[row_num_S];
// Then consider caching columns in R
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
else
{
if(avail_col_S >= (feature_num_S - 1))
{
cache_S = new double*[feature_num_S];
for(int i = 0; i < feature_num_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
//No need to reserve X_S for single column reading
avail_col_S = feature_num_S;
}
else
{
cache_S = new double*[avail_col_S];
for(int i = 0; i < avail_col_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
}
//Then consider the caching for R using the remaining caching space
if(avail_col_S == feature_num_S)
{
avail_cache = avail_cache - (avail_col_S - 1)*sizeof(double)*row_num_S;
}
else
{
avail_cache = avail_cache - avail_col_S*sizeof(double)*row_num_S;
}
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
}
//Dynamic memory alloaction
if(avail_col_S < feature_num_S)
{
X_S = new double[row_num_S];
}
if(avail_col_R < feature_num_R)
{
X_R = new double[row_num_R];
}
model = new double[feature_num];
Y = new double[row_num_S];
H = new double[row_num_S];
X_R_f = new double[row_num_R];
KKMR = new double[row_num_S];
// Initialization of variables for loss and gradient
double F = 0.00;
double F_partial = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
// Initialization
memset(model, 0.00, sizeof(float)*feature_num);
memset(H, 0.00, sizeof(float)*row_num_S);
DM.fetchColumn(fields_S[1], row_num_S, Y);
printf("\n");
DM.message("Start fetching KKMR reference");
// Read the fk column(referred rid in R) in table S, rid column in R
ifstream fk;
// Load the fk to KKMR
fk.open(fields_S[2], ios::in | ios::binary);
// rid.open(table2_fields[0], ios::in | ios::binary);
if(!fk.is_open())
{
DM.errorMessage("Error Message: Cannot load the fk column.");
exit(1);
}
fk.read((char *)KKMR, row_num_S*(sizeof(double)));
fk.close();
DM.message("Finished fetching KKMR reference");
vector<int> original_index_set;
vector<int> shuffling_index;
// Initialize the original_index_set
for(int i = 0; i < feature_num; i ++)
{
original_index_set.push_back(i);
}
// Shuffling
shuffling_index = shuffle(original_index_set, (unsigned)time(NULL));
// Caching S
printf("\n");
printf("Avail_col_S: %d\n", avail_col_S);
for(int i = 0; i < avail_col_S; i ++)
{
// printf("Cache %d th column in S\n", i);
DM.fetchColumn(fields_S[3+i], row_num_S, cache_S[i]);
}
// Caching R
printf("\n");
printf("Avail_col_R: %d\n", avail_col_R);
for(int k = 0; k < avail_col_R; k ++)
{
//printf("Cache %d th column in R\n", k);
DM.fetchColumn(fields_R[1+k],row_num_R, cache_R[k]);
}
c_end = clock();
cout<<"Caching:"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<"ms\n";
do
{
c_start = clock();
// Update one coordinate each time
for(int j = 0; j < feature_num; j ++)
{
int cur_index = shuffling_index.at(j);
//printf("Current feature index: %d\n", cur_index);
F_partial = 0.00;
if(cur_index < feature_num_S)
{
// Check cache for S
if(cur_index < avail_col_S)
{
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i],lm)*cache_S[cur_index][i];
}
}
else
{
// Fetch the corresponding column in S and store in X_S
DM.fetchColumn(fields_S[3+cur_index], row_num_S, X_S);
// Compute the partial gradient
for(int i = 0; i < row_num_S; i ++)
{
F_partial += gradientCompute(Y[i],H[i],lm)*X_S[i];
}
}
// Store the old W(j)
double W_j = model[cur_index];
// Update the current coordinate
model[cur_index] = model[cur_index] - step_size * F_partial;
double diff = model[cur_index] - W_j;
// Update the intermediate variable
// H = H + (Wj - old_Wj)* X(,j)
if(cur_index < avail_col_S)
{
for(long m = 0; m < row_num_S; m ++ )
{
H[m] = H[m] + diff*cache_S[cur_index][m];
}
}
else{
for(long m = 0; m < row_num_S; m ++ )
{
H[m] = H[m] + diff*X_S[m];
}
}
}
else
{
memset(X_R_f, 0.00, sizeof(float)*row_num_R);
// Check cache for R
int col_index_R = cur_index - feature_num_S;
// Compute the factorized factor
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
X_R_f[fk-1] += gradientCompute(Y[m],H[m],lm);
}
if(col_index_R < avail_col_R)
{
// Compute the partial gradient
for(long k = 0; k < row_num_R; k ++)
{
F_partial += cache_R[col_index_R][k]*X_R_f[k];
}
}
else
{
DM.fetchColumn(fields_R[1+col_index_R], row_num_R, X_R);
for(long k = 0; k < row_num_R; k ++)
{
F_partial += X_R[k]*X_R_f[k];
}
}
// Store the old W(j)
double W_j = model[cur_index];
// Update the current coordinate
model[cur_index] = model[cur_index] - step_size * F_partial;
double diff = model[cur_index] - W_j;
// Factorized computation
if(col_index_R < avail_col_R)
{
for(long k = 0; k < row_num_R; k ++)
{
X_R_f[k] = diff*cache_R[col_index_R][k];
}
}
else
{
for(long k = 0; k < row_num_R; k ++)
{
X_R_f[k] = diff*X_R[k];
}
}
// Update the intermediate variable
// H = H + (Wj - old_Wj)* X(,j)
for(long m = 0; m < row_num_S; m ++ )
{
long fk = KKMR[m];
H[m] = H[m] + X_R_f[fk-1];
}
}
}
r_prev = F;
// Caculate F
F = 0.00;
for(int i = 0; i < row_num_S; i ++)
{
double tmp = lossCompute(Y[i],H[i],lm);
F += tmp;
}
r_curr = F;
iters ++;
c_end = clock();
cout<<"Iteration:"<<1000*(c_end-c_start)/CLOCKS_PER_SEC<<" ms\n";
}
while(!stop(iters, r_prev, r_curr, _setting));
delete [] Y;
delete [] H;
delete [] X_R_f;
delete [] KKMR;
if(avail_col_S < feature_num_S)
{
delete [] X_S;
}
if(avail_col_R < feature_num_R)
{
delete [] X_R;
}
// Clear the cache
if(avail_col_S > 0)
{
for(int i = 0; i < avail_col_S; i ++)
{
delete [] cache_S[i];
}
delete [] cache_S;
}
if(avail_col_R > 0)
{
for(int i = 0; i < avail_col_R; i ++)
{
delete [] cache_R[i];
}
delete [] cache_R;
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish factorize");
}
#pragma mark - Block Coordinate Descent
void techniques::materializeBCD(string table_T, setting _setting, double *&model, int block_size, double avail_mem, const char *lm)
{
DataManagement DM;
DM.message("Start materializeBCD");
// Get the table information and column names
vector<long> tableInfo(3);
vector<string> fields = DM.getFieldNames(table_T, tableInfo);
int feature_num = (int)tableInfo[1];
long row_num = tableInfo[2];
// Block Info
int block_num = feature_num/block_size;
int block_residual = feature_num%block_size;
block_num = block_residual > 0 ? (block_num + 1) : block_num;
// For cache
long avail_mem_total = 1024*1024*1024*avail_mem;
int avail_col = 0;
int avail_cache = 0;
double **cache;
// Label Array
double *Y;
// Residual Vector
double *H;
// Buffer for column reading
double *X;
// Additional columns space reserved for gradient computation
double *G;
double *difference;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of each column
avail_col = avail_mem_total/(sizeof(double)*row_num);
// Calculate the available remaining space for cache
avail_cache = avail_col - 5;
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space");
exit(1);
}
else if (avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
if( avail_cache >= feature_num - 1 )
{
cache = new double*[feature_num];
for(int i = 0; i < feature_num; i ++)
{
cache[i] = new double[row_num];
}
//No need to reserve the X buffer to read single column
avail_cache = feature_num;
}
else
{
cache = new double*[avail_cache];
for(int i = 0; i < avail_cache; i ++)
{
cache[i] = new double[row_num];
}
}
}
// Dynamic memory allocation
if(avail_cache < feature_num)
{
// Allocate the memory to X
X = new double[row_num];
}
Y = new double[row_num];
H = new double[row_num];
G = new double[row_num];
difference = new double[row_num];
model = new double[feature_num];
// Initialization of variables for loss and gradient
double F = 0.00;
double F_partial[block_size];
// Initialize the partial graident for every block
for(int i = 0; i < block_size; i ++)
{
F_partial[i] = 0.00;
}
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
for(int i = 0; i < feature_num; i ++)
{
model[i] = 0.00;
}
for(long i = 0; i < row_num; i ++)
{
H[i] = 0.00;
G[i] = 0.00;
difference[i] = 0.00;
}
DM.fetchColumn(fields[1], row_num, Y);
// Two level shuffling: first shuffling all columns, then all blocks
vector<int> original_index;
vector<int> shuffling_index;
vector<int> original_block_index;
vector<int> shuffling_block_index;
// Initialize the original_index_set
for(int i = 0; i < feature_num; i ++)
{
original_index.push_back(i);
}
for(int i = 0; i < block_num; i ++)
{
original_block_index.push_back(i);
}
// Shuffling
shuffling_index = shuffle(original_index, (unsigned)time(NULL));
shuffling_block_index = shuffle(original_block_index, (unsigned)time(NULL));
// Print the shuffling_index and shuffling_block_index
/**
printf("After shuffling, the feature indexes:\n");
for(int i = 0; i < feature_num; i ++)
{
printf("[%d]\n",shuffling_index.at(i));
}
printf("After shuffling, the block indexes:\n");
for(int i = 0; i < block_num; i ++)
{
printf("[%d]\n",shuffling_block_index.at(i));
}
**/
// Caching
printf("\n");
printf("Avail_col: %d\n", avail_cache);
for(int i = 0; i < avail_cache; i ++)
{
printf("Cache %d th column\n", i);
DM.fetchColumn(fields[i+2],row_num, cache[i]);
}
do
{
// Update one "block" each time
// "Cumulative" difference in H caused by block
for(int j = 0; j < block_num; j ++)
{
int cur_block_index = shuffling_block_index.at(j);
//printf("Current_block_index: %d\n",cur_block_index);
int cur_block_size = 0;
//Check whether the current block is the "residual"
if( (cur_block_index == block_num - 1) && block_residual > 0 )
{
cur_block_size = block_residual;
}
else
{
cur_block_size = block_size;
}
for(long d = 0; d < row_num; d ++)
{
difference[d] = 0.00;
}
// Start with "first level" block index
int block_start_index= 0;
// Double indexing: here, the index is the "index" of the "real index"
// Update each 'block' by starting with getting the block index
block_start_index = cur_block_index*block_size;
//printf("Block_start_index: %d\n",shuffling_index.at(block_start_index));
// First calculate the statistics used for gradient
for(long g = 0; g < row_num; g ++)
{
G[g] = gradientCompute(Y[g],H[g],lm);
}
for(int b = 0; b < cur_block_size; b ++)
{
int cur_index = shuffling_index.at(block_start_index+b);
//printf("Current feature index: %d\n", cur_index);
F_partial[b] = 0.00;
// Check for Cache
if(cur_index < avail_cache)
{
// Compute the partial gradient from cache
for(long i = 0; i < row_num ; i ++)
{
F_partial[b] += G[i]*cache[cur_index][i];
}
}
else
{
// Fetch the column and store the current column into X
DM.fetchColumn(fields[cur_index+2], row_num, X);
// Compute the partial gradient
for(long i = 0; i < row_num ; i ++)
{
F_partial[b] += G[i]*X[i];
}
}
// Store the old W(j)
int cur_model_index = cur_index;
double diff = model[cur_model_index];
// Update the current coordinate
model[cur_model_index] = model[cur_model_index] - step_size * F_partial[b];
// Compute the difference on current coordinate
diff = model[cur_model_index] - diff;
// Update the cumulative difference
if(cur_index < avail_cache)
{
for(long m = 0; m < row_num; m ++)
{
difference[m] += diff*cache[cur_index][m];
}
}
else
{
for(long m = 0; m < row_num; m ++)
{
difference[m] += diff*X[m];
}
}
}
for(long m = 0; m < row_num; m ++ )
{
H[m] = H[m] + difference[m];
}
}
r_prev = F;
// Caculate F
F = 0.00;
for(long i = 0; i < row_num ; i ++)
{
double tmp = lossCompute(Y[i],H[i], lm);
F += tmp;
}
r_curr = F;
iters ++;
}
while(!stop(iters, r_prev, r_curr, _setting));
delete [] Y;
delete [] H;
delete [] G;
delete [] difference;
if(avail_cache < feature_num)
{
delete [] X;
}
// Clear the cache
if(avail_cache > 0)
{
for(int i = 0; i < avail_cache; i ++)
{
delete [] cache[i];
}
delete [] cache;
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish materializeBCD");
}
void techniques::factorizeBCD(string table_S, string table_R, setting _setting, double *&model, int block_size, double avail_mem, const char *lm)
{
DataManagement DM;
DM.message("Start factorizeBCD");
// Get the table information and column names
vector<long> tableInfo_S(3);
vector<long> tableInfo_R(3);
vector<string> fields_S = DM.getFieldNames(table_S, tableInfo_S);
vector<string> fields_R = DM.getFieldNames(table_R, tableInfo_R);
int feature_num_S = (int)tableInfo_S[1];
int feature_num_R = (int)tableInfo_R[1];
int feature_num = feature_num_S + feature_num_R;
int row_num_S = tableInfo_S[2];
int row_num_R = tableInfo_R[2];
// Block Info
int block_num = feature_num/block_size;
int block_residual = feature_num%block_size;
block_num = block_residual > 0 ? (block_num + 1) : block_num;
// For Cache
long avail_mem_total = 1024*1024*1024*avail_mem;;
long avail_cache = 0;
int avail_col_S = 0;
int avail_col_R = 0;
double **cache_R;
double **cache_S;
// Label array
double *Y;
// Residual vector
double *H;
// Buffer for column reading in S
double *X_S;
// Buffer for column reading in R
double *X_R;
// Buffer to store factorized factor when considering column R
double *X_R_f;
// OID-OID Mapping (Key Foreign-Key Mapping Reference, to be kept in memory)
double *KKMR;
// Additional column space reserved for gradient computation
double *G;
double *difference;
// Setting
double step_size = _setting.step_size;
// Calculate the available memory measured by size of each column in R and S
avail_cache = avail_mem_total - sizeof(double)*(6*row_num_S + 2*row_num_R);
if(avail_cache < 0)
{
DM.errorMessage("Insufficient memory space");
exit(1);
}
else if(avail_cache == 0)
{
DM.message("No space for caching");
}
else
{
// First consider caching columns in S
avail_col_S = avail_cache/(sizeof(double)*row_num_S);
if(avail_col_S == 0)
{
DM.message("No space for caching S");
// Then consider caching columns in R
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
// No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
else
{
if(avail_col_S >= feature_num_S)
{
cache_S = new double*[feature_num_S];
for(int i = 0; i < feature_num_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
// No need to reserve X_S for single column reading
avail_col_S = feature_num_S;
}
else
{
X_S = new double[row_num_S];
cache_S = new double*[avail_col_S];
for(int i = 0; i < avail_col_S; i ++)
{
cache_S[i] = new double[row_num_S];
}
}
// Then consider the caching for R using the remaining caching space
if(avail_col_S == feature_num_S)
{
avail_cache = avail_cache - (avail_col_S - 1)*sizeof(double)*row_num_S;
}
else
{
avail_cache = avail_cache - avail_col_S*sizeof(double)*row_num_S;
}
avail_col_R = avail_cache/(sizeof(double)*row_num_R);
if(avail_col_R == 0)
{
DM.message("No space for caching R");
}
else
{
if(avail_col_R >= feature_num_R - 1)
{
cache_R = new double*[feature_num_R];
for(int i = 0; i < feature_num_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
//No need to reserve the X_R buffer to read a single column in R
avail_col_R = feature_num_R;
}
else
{
cache_R = new double*[avail_col_R];
for(int i = 0; i < avail_col_R; i ++)
{
cache_R[i] = new double[row_num_R];
}
}
}
}
}
// Dynamic memory allocation
if(avail_col_S < feature_num_S)
{
X_S = new double[row_num_S];
}
if(avail_col_R < feature_num_R)
{
X_R = new double[row_num_R];
}
Y = new double[row_num_S];
H = new double[row_num_S];
X_R_f = new double[row_num_R];
G = new double[row_num_S];
difference = new double[row_num_S];
KKMR = new double[row_num_S];
model = new double[feature_num];
// Initialization of variables for loss and gradient
double F = 0.00;
double F_partial[block_size];
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
// Initialize the partial graident for every block
for(int i = 0; i < block_size; i ++)
{
F_partial[i] = 0.00;
}
// Initialization
for(int i = 0; i < feature_num; i ++)
{
model[i] = 0.00;
}
for(long i = 0; i < row_num_S; i ++)
{
H[i] = 0.00;
G[i] = 0.00;
difference[i] = 0.00;
}
for(long i = 0; i < row_num_R; i ++)
{
X_R_f[i] = 0.00;
}
DM.fetchColumn(fields_S[1], row_num_S, Y);
printf("\n");
DM.message("Start fetching KKMR reference");
// Read the fk column(referred rid in R) in table S, rid column in R
ifstream fk;
// Load the fk to KKMR
fk.open(fields_S[2], ios::in | ios::binary);
// rid.open(table2_fields[0], ios::in | ios::binary);
if(!fk.is_open())
{
DM.errorMessage("Error Message: Cannot load the fk column.");
exit(1);
}
fk.read((char *)KKMR, row_num_S*(sizeof(double)));
fk.close();
DM.message("Finished fetching KKMR reference");
//Two level shuffling: first shuffling all columns, then all blocks
vector<int> original_index;
vector<int> shuffling_index;
vector<int> original_block_index;
vector<int> shuffling_block_index;
// Initialize the original_index_set
for(int i = 0; i < feature_num; i ++)
{
original_index.push_back(i);
}
for(int i = 0; i < block_num; i ++)
{
original_block_index.push_back(i);
}
// Shuffling
shuffling_index = shuffle(original_index, (unsigned)time(NULL));
shuffling_block_index = shuffle(original_block_index, (unsigned)time(NULL));
// Print the shuffling_index and shuffling_block_index
/**
printf("After shuffling, the feature indexes:\n");
for(int i = 0; i < feature_num; i ++)
{
printf("[%d]\n",shuffling_index.at(i));
}
//printf("After shuffling, the block indexes:\n");
for(int i = 0; i < block_num; i ++)
{
printf("[%d]\n",shuffling_block_index.at(i));
}
**/
// Caching S
printf("\n");
printf("Avail_col_S: %d\n", avail_col_S);
for(int i = 0; i < avail_col_S; i ++)
{
printf("Cache %d th column in S\n", i);
DM.fetchColumn(fields_S[3+i], row_num_S, cache_S[i]);
}
// Caching R
printf("\n");
printf("Avail_col_R: %d\n", avail_col_R);
for(int k = 0; k < avail_col_R; k ++)
{
printf("Cache %d th column in R\n", k);
DM.fetchColumn(fields_R[1+k],row_num_R, cache_R[k]);
}
do
{
// Update one "block" each time
// "Cumulative" difference in H caused by block
for(int j = 0; j < block_num; j ++)
{
int cur_block_index = shuffling_block_index.at(j);
//printf("Current_block_index: %d\n",cur_block_index);
int cur_block_size = 0;
//Check whether the current block is the "residual"
if( (cur_block_index == block_num - 1) && block_residual > 0 )
{
cur_block_size = block_residual;
}
else
{
cur_block_size = block_size;
}
for(long d = 0; d < row_num_S; d ++)
{
difference[d] = 0.00;
}
// Start with "first level" block index
int block_start_index= 0;
// Double indexing: here, the index is the "index" of the "real index"
// Update each 'block' by starting with getting the block index
block_start_index = cur_block_index*block_size;
//printf("Block_start_index: %d\n", shuffling_index.at(block_start_index));
// First calculate the statistics used for gradient
for(long g = 0; g < row_num_S; g ++)
{
G[g] = gradientCompute(Y[g],H[g],lm);
}
for(int b = 0; b < cur_block_size; b ++)
{
int cur_index = shuffling_index.at(block_start_index + b);
//printf("Current feature index: %d\n", cur_index);;
F_partial[b] = 0.00;
// Check whether the column is in table R. If it is, applied factorized learning
if(cur_index < feature_num_S)
{
// Check cache for S
if(cur_index < avail_col_S)
{
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial[b] += G[i]*cache_S[cur_index][i];
}
}
else
{
// Fetch each column and store the column into X
DM.fetchColumn(fields_S[cur_index+3], row_num_S, X_S);
// Compute the partial gradient
for(long i = 0; i < row_num_S; i ++)
{
F_partial[b] += G[i]*X_S[i];
}
}
// Store the old Wj
int cur_model_index = cur_index;
double W_j = model[cur_model_index];
// Update the current coordinate
model[cur_model_index] = model[cur_model_index] - step_size * F_partial[b];
// Compute the difference
double diff = model[cur_model_index] - W_j;
// Update the cumulative difference
if(cur_index < avail_col_S)
{
for(long m = 0; m < row_num_S; m ++)
{
difference[m] += diff*cache_S[cur_index][m];
}
}
else
{
for(long m = 0; m < row_num_S; m ++)
{
difference[m] += diff*X_S[m];
}
}
}
else
{
for(long i = 0; i < row_num_R; i ++)
{
X_R_f[i] = 0.00;
}
// Check cache for R
int col_index_R = cur_index - feature_num_S;
//printf("col_index_R: %d\n",col_index_R);
// Apply factorized learning to gradient computation
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
X_R_f[fk-1] += G[m];
}
if(col_index_R < avail_col_R)
{
for(long j = 0; j < row_num_R; j ++)
{
F_partial[b] += cache_R[col_index_R][j]*X_R_f[j];
}
}
else
{
// Fetch the corresponding column in R
DM.fetchColumn(fields_R[1+col_index_R],row_num_R, X_R);
for(long j = 0; j < row_num_R; j ++)
{
F_partial[b] += X_R[j]*X_R_f[j];
}
}
int cur_model_index = cur_index;
double W_j = model[cur_model_index];
model[cur_model_index] = model[cur_model_index] - step_size * F_partial[b];
double diff = model[cur_model_index] - W_j;
// Apply factorized learning to difference (of model/coordinate) computation
if(col_index_R < avail_col_R)
{
for(int i = 0; i < row_num_R; i ++ )
{
X_R_f[i] = diff*cache_R[col_index_R][i];
}
}
else
{
for(int i = 0; i < row_num_R; i ++ )
{
X_R_f[i] = diff*X_R[i];
}
}
for(long m = 0; m < row_num_S; m ++)
{
long fk = KKMR[m];
difference[m] += X_R_f[fk-1];
}
}
}
for(long m = 0; m < row_num_S; m ++)
{
H[m] = H[m] + difference[m];
}
}
r_prev = F;
// Caculate F
F = 0.00;
for(long i = 0; i < row_num_S; i ++)
{
double tmp = lossCompute(Y[i],H[i],lm);
F += tmp;
}
r_curr = F;
iters ++;
}
while(!stop(iters, r_prev, r_curr, _setting));
delete [] Y;
delete [] H;
delete [] X_R_f;
delete [] KKMR;
delete [] G;
delete [] difference;
if(avail_col_S < feature_num_S)
{
delete [] X_S;
}
if(avail_col_R < feature_num_R)
{
delete [] X_R;
}
// Clear Cache
if(avail_col_R > 0)
{
for(int i = 0; i < avail_col_R; i ++)
{
delete [] cache_R[i];
}
delete [] cache_R;
}
if(avail_col_S > 0)
{
for(int i = 0; i < avail_col_S; i ++)
{
delete [] cache_S[i];
}
delete [] cache_S;
}
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DM.message("Finish factorizeBCD");
}
#pragma mark - Gradient descent
/*
Read a single file the columns of which are in format like: id, label, feature
The offset entry for W0 is not considered for now
Logistic Regression for now
*/
// Specific techniques selection: flag (for generalization purpose)
// Stochastic Gradient Descent
void techniques::SGD(vector< vector<double> > data, setting _setting, double *&model, int feature_num)
{
DataManagement::message("Start SGD");
long data_size = data.size();
vector<long> original_index_set;
vector<long> shuffling_index;
//Initialize the original_index_set
std::cout << "Start building the index set" << std::endl;
for(long i = 0; i < data_size; i ++)
{
original_index_set.push_back(i);
}
// Shuffling
shuffling_index = shuffle(original_index_set, (unsigned)time(NULL));
// Setting
double step_size = _setting.step_size;
// Allocate the memory to model
model = new double[feature_num];
for(int i = 0; i < feature_num; i ++)
{
model[i] = 0.00;
}
// Loss Function
double F = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
std::cout << "Start training" << std::endl;
do
{
r_prev = F;
F = 0.00;
vector<double> gradient(feature_num,0.00);
for(long j = 0; j < data_size; j ++)
{
long cur_index = shuffling_index[j];
// Update the model
double output = 0.00;
for(int k = 0; k < feature_num; k ++)
{
output += model[k]*data[cur_index][k+2];
}
for(int k = 0; k < feature_num; k ++)
{
gradient[k] = gradientCompute(data[cur_index][1],output, "lr")*data[cur_index][2+k];
model[k] = model[k]-step_size*gradient[k];
}
}
// Calculate F
for(long j = 0; j < data_size; j ++)
{
double output = 0.00;
for(int k = 0; k < feature_num; k ++)
{
output += model[k]*data[j][k+2];
}
double tmp = lossCompute(data[j][1], output, "lr");
F += tmp;
}
r_curr = F;
std::cout << "Loss: " << F << std::endl;
iters ++;
}
while(!stop(iters ,r_prev,r_curr,_setting));
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DataManagement::message("Finish SGD");
}
#pragma mark - Batch Gradient Descent
// Batch Gradient Descent
void techniques::BGD(vector< vector<double> > data, setting _setting, double *&model, int feature_num)
{
DataManagement::message("Start BGD");
long data_size = data.size();
// Setting
double step_size = _setting.step_size;
// Allocate the memory to the model
model = new double[feature_num];
for(int i = 0; i < feature_num; i ++)
{
model[i] = 0.00;
}
// Loss Function
double F = 0.00;
double r_curr = 0.00;
double r_prev = 0.00;
int iters = 0;
do
{
r_prev = F;
F = 0.00;
vector<double> gradient(feature_num,0.00);
for(long j = 0; j < data_size; j ++)
{
// Update the model
double output = 0.00;
for(int k = 0; k < feature_num; k ++)
{
output += model[k]*data[j][2+k];
}
for(int k = 0; k < feature_num; k ++)
{
gradient[k] += gradientCompute(data[j][1],output, "lm")*data[j][2+k];
}
}
for(int k = 0; k < feature_num; k ++)
{
model[k] = model[k]-step_size*gradient[k];
}
for(long j = 0; j < data_size; j ++)
{
double output = 0.00;
for(int k = 0; k < feature_num; k ++)
{
output += model[k]*data[j][2+k];
}
double tmp = lossCompute(data[j][1], output, "lm");
printf("tmp loss: %f\n", tmp);
F += tmp;
}
r_curr = F;
printf("The loss: %lf\n",F);
iters ++;
}
while(!stop(iters ,r_prev,r_curr,_setting));
printf("\n");
outputResults(r_curr, feature_num, iters, model);
DataManagement::message("Finish BGD");
}
void techniques::classify(vector< vector<double> > data, vector<double> model)
{
// Count the number of correct classifcation
long count = 0;
long data_size = data.size();
if(data.at(0).size() != model.size()+2)
{
DataManagement::errorMessage("Inconsistent file provided");
}
int featureNum = (int)model.size();
for(long i = 0; i < data_size; i ++)
{
double actual_label = data[i][1];
double predicted_label = 0.00;
double confidence = 0.00;
double output = 0.00;
for(int j = 0; j < featureNum; j ++)
{
output += model[j]*data[i][2+j];
}
printf("W^TX: %f\n", output);
confidence = C_lr(output);
if(confidence > 0.5)
{
predicted_label = 1.00;
}
else
{
predicted_label = -1.00;
}
if(actual_label == predicted_label)
{
printf("Prediction Correct\n");
count++;
}
else
{
printf("Prediction Wrong\n");
}
printf("Confidence: %f\n", confidence);
printf("Actual Label: %f , Predicted Label: %f\n", actual_label, predicted_label);
}
printf("Correcteness: %f \n", (double)count/(double)data_size);
}
#pragma mark - shuffling
vector<int> techniques::shuffle(vector<int> &index_set, unsigned seed)
{
vector<int> original_set = index_set;
int size = (int)index_set.size();
vector<int> new_index_set;
srand (seed);
for(int i = 0; i < size; i ++)
{
int cur_size = (int)original_set.size();
int rand_index = random()%cur_size;
new_index_set.push_back(original_set.at(rand_index));
original_set.erase(original_set.begin()+rand_index);
}
return new_index_set;
}
vector<long> techniques::shuffle(vector<long> &index_set, unsigned seed)
{
vector<long> original_set = index_set;
long size = (long)index_set.size();
vector<long> new_index_set;
srand(seed);
for(long i = 0; i < size; i ++)
{
long cur_size = original_set.size();
long rand_index = random()%cur_size;
new_index_set.push_back(original_set.at(rand_index));
original_set.erase(original_set.begin()+rand_index);
}
return new_index_set;
}
#pragma mark - stop creteria
bool techniques::stop(int k, double r_prev, double r_curr, setting &setting)
{
double iter_num = k;
double difference = abs(r_prev - r_curr);
if( iter_num == setting.iter_num || difference <= setting.error)
{
return true;
}
else
{
return false;
}
}
#pragma mark - print the final result
void techniques::outputResults(double r_curr, int feature_num, int k, double *&model)
{
printf("The final loss: %lf\n", r_curr);
printf("Number of iteration: %d\n", k);
printf("Model: ");
for(int i = 0; i < feature_num; i ++)
{
if(i == feature_num - 1)
{
printf("%.20f\n",model[i]);
}
else
{
printf("%.20f, ",model[i]);
}
}
}
|
8bb2a4e5dcf1503aab35a0db44a8679dbf873285.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <vector>
#include <cfloat>
#include "caffe/layer.hpp"
//#include "caffe/vision_layers.hpp"
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b,
const Dtype* batch_coeff, const int data_blob_idx, const int num_batches,
const int inner_size, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int batch_idx = index/inner_size;
// do nothing if current batch_coeff is 0
if (batch_coeff != NULL && batch_coeff[data_blob_idx*num_batches + batch_idx] == Dtype(0))
return;
if (bottom_data_a[index] < bottom_data_b[index]) {
top_data[index] = bottom_data_b[index];
}
}
}
template <typename Dtype>
__global__ void SumForward(const int nthreads, const Dtype* bottom_data,
const Dtype* batch_coeff, const int data_blob_idx, const int num_batches,
const int inner_size, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int batch_idx = index/inner_size;
// do nothing if current batch_coeff is 0
top_data[index] += bottom_data[index] *
(batch_coeff != NULL ? batch_coeff[data_blob_idx*num_batches + batch_idx]: Dtype(1.0));
}
}
template <typename Dtype>
void MergeBatchLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data();
int start_data_blob = ignore_blob_ ? 1 : 0;
int inner_size = bottom[start_data_blob]->count(1);
int count = top[0]->count();
// set batch_coeff from bottom[0] if given
const Dtype* batch_coeff = NULL;
if (ignore_blob_){
batch_coeff = bottom[0]->gpu_data();
}
switch (op_) {
case MergeBatchParameter_MergeOp_SUM:
// Initialize
caffe_gpu_set(count, Dtype(0.0), top_data);
for (int i = start_data_blob; i < bottom.size(); ++i) {
const Dtype* bottom_data_i = bottom[i]->gpu_data();
// need to substract ignore_blob
hipLaunchKernelGGL(( SumForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data_i, batch_coeff, i-start_data_blob, num_batches_, inner_size, top_data);
}
break;
case MergeBatchParameter_MergeOp_MAX:
// Initialize
caffe_gpu_set(count, Dtype(-FLT_MAX), top_data);
for (int i = start_data_blob; i < bottom.size(); ++i) {
const Dtype* bottom_data_i = bottom[i]->gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( MaxForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_data, bottom_data_i, batch_coeff, i-start_data_blob, num_batches_, inner_size, top_data);
}
break;
default:
LOG(FATAL) << "Unknown merge operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const Dtype* top_data,
const Dtype* bottom_data, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// equivalent to MIL, put diff to the bottom blob that is equal to top_data
Dtype top_val = top_data[index];
if (top_val == bottom_data[index] && top_val != -FLT_MAX)
bottom_diff[index] = top_diff[index];
}
}
template <typename Dtype>
__global__ void SumBackward( const int nthreads, const Dtype* top_diff, const Dtype* batch_coeff,
const int data_blob_idx, const int num_batches, const int inner_size, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int batch_idx = index/inner_size;
// bottom diff is scaled top diff
bottom_diff[index] = top_diff[index] *
(batch_coeff!=NULL?batch_coeff[data_blob_idx*num_batches + batch_idx]:Dtype(1.0));
}
}
template <typename Dtype>
void MergeBatchLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int start_data_blob = ignore_blob_ ? 1 : 0;
const Dtype* top_diff=top[0]->gpu_diff();
const Dtype* top_data=top[0]->gpu_data();
int inner_size = bottom[start_data_blob]->count(1);
int count = bottom[start_data_blob]->count();
// set batch_coeff from bottom[0] if given
const Dtype* batch_coeff = NULL;
if (ignore_blob_){
batch_coeff = bottom[0]->gpu_data();
}
for (int i=start_data_blob; i<bottom.size(); i++){
if(propagate_down[i]) {
Dtype* bottom_diff_i = bottom[i]->mutable_gpu_diff();
caffe_gpu_set(count, Dtype(0.0), bottom_diff_i);
const Dtype* bottom_data_i = bottom[i]->gpu_data();
switch (op_) {
case MergeBatchParameter_MergeOp_SUM:
// LOG(INFO) << "SUM";
hipLaunchKernelGGL(( SumBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, batch_coeff, i-start_data_blob, num_batches_, inner_size, bottom_diff_i);
break;
case MergeBatchParameter_MergeOp_MAX:
// LOG(INFO) << "MAX";
hipLaunchKernelGGL(( MaxBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, top_data, bottom_data_i, bottom_diff_i);
break;
case MergeBatchParameter_MergeOp_MEAN:
break;
default:
LOG(FATAL) << "Unknown merge operation.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MergeBatchLayer);
} // namespace caffe
| 8bb2a4e5dcf1503aab35a0db44a8679dbf873285.cu | #include <vector>
#include <cfloat>
#include "caffe/layer.hpp"
//#include "caffe/vision_layers.hpp"
#include "caffe/common_layers.hpp"
#include "caffe/util/math_functions.hpp"
namespace caffe {
template <typename Dtype>
__global__ void MaxForward(const int nthreads, const Dtype* bottom_data_a, const Dtype* bottom_data_b,
const Dtype* batch_coeff, const int data_blob_idx, const int num_batches,
const int inner_size, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int batch_idx = index/inner_size;
// do nothing if current batch_coeff is 0
if (batch_coeff != NULL && batch_coeff[data_blob_idx*num_batches + batch_idx] == Dtype(0))
return;
if (bottom_data_a[index] < bottom_data_b[index]) {
top_data[index] = bottom_data_b[index];
}
}
}
template <typename Dtype>
__global__ void SumForward(const int nthreads, const Dtype* bottom_data,
const Dtype* batch_coeff, const int data_blob_idx, const int num_batches,
const int inner_size, Dtype* top_data) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int batch_idx = index/inner_size;
// do nothing if current batch_coeff is 0
top_data[index] += bottom_data[index] *
(batch_coeff != NULL ? batch_coeff[data_blob_idx*num_batches + batch_idx]: Dtype(1.0));
}
}
template <typename Dtype>
void MergeBatchLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
const vector<Blob<Dtype>*>& top) {
Dtype* top_data = top[0]->mutable_gpu_data();
int start_data_blob = ignore_blob_ ? 1 : 0;
int inner_size = bottom[start_data_blob]->count(1);
int count = top[0]->count();
// set batch_coeff from bottom[0] if given
const Dtype* batch_coeff = NULL;
if (ignore_blob_){
batch_coeff = bottom[0]->gpu_data();
}
switch (op_) {
case MergeBatchParameter_MergeOp_SUM:
// Initialize
caffe_gpu_set(count, Dtype(0.0), top_data);
for (int i = start_data_blob; i < bottom.size(); ++i) {
const Dtype* bottom_data_i = bottom[i]->gpu_data();
// need to substract ignore_blob
SumForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data_i, batch_coeff, i-start_data_blob, num_batches_, inner_size, top_data);
}
break;
case MergeBatchParameter_MergeOp_MAX:
// Initialize
caffe_gpu_set(count, Dtype(-FLT_MAX), top_data);
for (int i = start_data_blob; i < bottom.size(); ++i) {
const Dtype* bottom_data_i = bottom[i]->gpu_data();
// NOLINT_NEXT_LINE(whitespace/operators)
MaxForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_data, bottom_data_i, batch_coeff, i-start_data_blob, num_batches_, inner_size, top_data);
}
break;
default:
LOG(FATAL) << "Unknown merge operation.";
}
}
template <typename Dtype>
__global__ void MaxBackward(const int nthreads, const Dtype* top_diff, const Dtype* top_data,
const Dtype* bottom_data, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
// equivalent to MIL, put diff to the bottom blob that is equal to top_data
Dtype top_val = top_data[index];
if (top_val == bottom_data[index] && top_val != -FLT_MAX)
bottom_diff[index] = top_diff[index];
}
}
template <typename Dtype>
__global__ void SumBackward( const int nthreads, const Dtype* top_diff, const Dtype* batch_coeff,
const int data_blob_idx, const int num_batches, const int inner_size, Dtype* bottom_diff) {
CUDA_KERNEL_LOOP(index, nthreads) {
const int batch_idx = index/inner_size;
// bottom diff is scaled top diff
bottom_diff[index] = top_diff[index] *
(batch_coeff!=NULL?batch_coeff[data_blob_idx*num_batches + batch_idx]:Dtype(1.0));
}
}
template <typename Dtype>
void MergeBatchLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) {
int start_data_blob = ignore_blob_ ? 1 : 0;
const Dtype* top_diff=top[0]->gpu_diff();
const Dtype* top_data=top[0]->gpu_data();
int inner_size = bottom[start_data_blob]->count(1);
int count = bottom[start_data_blob]->count();
// set batch_coeff from bottom[0] if given
const Dtype* batch_coeff = NULL;
if (ignore_blob_){
batch_coeff = bottom[0]->gpu_data();
}
for (int i=start_data_blob; i<bottom.size(); i++){
if(propagate_down[i]) {
Dtype* bottom_diff_i = bottom[i]->mutable_gpu_diff();
caffe_gpu_set(count, Dtype(0.0), bottom_diff_i);
const Dtype* bottom_data_i = bottom[i]->gpu_data();
switch (op_) {
case MergeBatchParameter_MergeOp_SUM:
// LOG(INFO) << "SUM";
SumBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, batch_coeff, i-start_data_blob, num_batches_, inner_size, bottom_diff_i);
break;
case MergeBatchParameter_MergeOp_MAX:
// LOG(INFO) << "MAX";
MaxBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, top_data, bottom_data_i, bottom_diff_i);
break;
case MergeBatchParameter_MergeOp_MEAN:
break;
default:
LOG(FATAL) << "Unknown merge operation.";
}
}
}
}
INSTANTIATE_LAYER_GPU_FUNCS(MergeBatchLayer);
} // namespace caffe
|
64815c6a27abade1279e480f80032b5b0d4ddb6b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Backward2_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
hipLaunchKernelGGL(( DropoutBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)),
dim3(CAFFE_CUDA_NUM_THREADS), 0, 0,
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_CLASS(DropoutLayer);
} // namespace caffe
| 64815c6a27abade1279e480f80032b5b0d4ddb6b.cu | #include <algorithm>
#include <limits>
#include <vector>
#include "caffe/common.hpp"
#include "caffe/layer.hpp"
#include "caffe/syncedmem.hpp"
#include "caffe/util/math_functions.hpp"
#include "caffe/vision_layers.hpp"
namespace caffe {
template <typename Dtype>
__global__ void DropoutForward(const int n, const Dtype* in,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out) {
CUDA_KERNEL_LOOP(index, n) {
out[index] = in[index] * (mask[index] > threshold) * scale;
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom,
vector<Blob<Dtype>*>* top) {
const Dtype* bottom_data = bottom[0]->gpu_data();
Dtype* top_data = (*top)[0]->mutable_gpu_data();
const int count = bottom[0]->count();
if (Caffe::phase() == Caffe::TRAIN) {
unsigned int* mask =
static_cast<unsigned int*>(rand_vec_.mutable_gpu_data());
caffe_gpu_rng_uniform(count, mask);
// set thresholds
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>(
count, bottom_data, mask, uint_thres_, scale_, top_data);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(count, bottom_data, top_data);
}
}
template <typename Dtype>
__global__ void DropoutBackward(const int n, const Dtype* in_diff,
const unsigned int* mask, const unsigned int threshold, const float scale,
Dtype* out_diff) {
CUDA_KERNEL_LOOP(index, n) {
out_diff[index] = in_diff[index] * scale * (mask[index] > threshold);
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
template <typename Dtype>
void DropoutLayer<Dtype>::Backward2_gpu(const vector<Blob<Dtype>*>& top,
const vector<bool>& propagate_down,
vector<Blob<Dtype>*>* bottom) {
if (propagate_down[0]) {
const Dtype* top_diff = top[0]->gpu_diff();
Dtype* bottom_diff = (*bottom)[0]->mutable_gpu_diff();
if (Caffe::phase() == Caffe::TRAIN) {
const unsigned int* mask =
static_cast<const unsigned int*>(rand_vec_.gpu_data());
const int count = (*bottom)[0]->count();
// NOLINT_NEXT_LINE(whitespace/operators)
DropoutBackward<Dtype><<<CAFFE_GET_BLOCKS(count),
CAFFE_CUDA_NUM_THREADS>>>(
count, top_diff, mask, uint_thres_, scale_, bottom_diff);
CUDA_POST_KERNEL_CHECK;
} else {
caffe_copy(top[0]->count(), top_diff, bottom_diff);
}
}
}
INSTANTIATE_CLASS(DropoutLayer);
} // namespace caffe
|
8aef60e6e9673d4a27e1291172a19b7e3355dfb5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <fstream>
#include <sstream>
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <cassert>
using namespace std;
typedef long long int lli;
void usage()
{
cout << "USAGE: ./exec <filename> <device>" << endl;
exit(0);
}
inline hipError_t checkCuda(hipError_t result) {
if (result != hipSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", hipGetErrorString(result));
assert(result == hipSuccess);
}
return result;
}
__global__
void PreProcess(double* xCuda, double* xx, lli TOTAL, lli xSIZE)
{
lli ind = (blockDim.x * blockIdx.x + threadIdx.x);
double* my_xCuda = xCuda + ind;
lli index = 0;
for(lli j = 0; j < xSIZE; j+=TOTAL)
{
my_xCuda[j] = xx[index++];
}
}
__global__
void PermanentCalculator(lli K, float* xCuda, float* pCuda, int* MCuda, lli CHUNK, lli N, lli xSIZE, lli TOTAL, lli Nsqr, lli BLOCK)
{
lli index = (blockDim.x * blockIdx.x + threadIdx.x) * CHUNK;
if(index < K)
{
lli ind = threadIdx.x;
lli LIM = BLOCK * N;
extern __shared__ float M[];
float* temp_xCuda = M;
float* my_xCuda = temp_xCuda + ind;
lli new_ind = 0;
for(lli i = 0; i < LIM; i+=BLOCK)
my_xCuda[i] = xCuda[new_ind++];
__syncthreads();
lli START = index + 1;
lli END = index + CHUNK + 1;
lli yy = index ^ (index >> 1LL);
lli y, y_prev, FFS, z;
double s, prodSign;
float pSelf = 0.0;
lli temp_y = yy;
lli BPC = __popcll(yy);
for(lli n = 0; n < BPC; n++)
{
FFS = __ffsll(temp_y) - 1;
temp_y &= ~(1LL << FFS);
new_ind = 0;
for(lli m = 0; m < LIM; m += BLOCK)
{
my_xCuda[m] += MCuda[(new_ind++) + (FFS*N)];
__syncthreads();
}
}
prodSign = ((index + 1) & 1LL) ? -1.0 : 1.0;
for(lli ii = START; (ii < END) && (ii < K); ii++)
{
y = (ii ^ (ii >> 1LL));
y_prev = (ii - 1) ^ ((ii - 1) >> 1LL);
z = __ffsll(y ^ y_prev) - 1;
s = ((y >> z) & 1LL) ? 1.0 : -1.0;
new_ind = 0;
float temp = 1.0;
//#pragma unroll
for(lli jj = 0; jj < LIM; jj += BLOCK)
{
my_xCuda[jj] += (s * MCuda[(new_ind++) + (z * N)]);
temp *= my_xCuda[jj];
__syncthreads();
}
pSelf += (prodSign * temp);
prodSign *= -1.0;
}
atomicAdd(pCuda, pSelf);
}
}
int main(int argc, const char** argv)
{
if(argc != 3)
usage();
string line;
const char* filename = argv[1];
ifstream input (filename);
if(input.fail())
return 0;
int cudaDevice = atoi(argv[2]);
hipSetDevice(cudaDevice);
hipDeviceProp_t prop;
hipGetDeviceProperties(&prop, cudaDevice);
lli N;
lli Nsqr;
int **M;
int *Mrow;
int *MCuda;
getline(input,line);
N = atoi(line.c_str());
Nsqr = N*N;
checkCuda(hipMalloc((void**)&MCuda, sizeof(int)*Nsqr));
Mrow = (int*)(malloc(sizeof(int)*Nsqr));
M = (int**)(malloc(sizeof(int*)*N));
for(int i = 0; i < N; i ++)
{
M[i] = (int*)(malloc(sizeof(int)*N));
}
int linectr = 0;
while(getline(input,line))
{
stringstream ss(line);
int temp;
int ctr = 0;
while(ss >> temp)
{
M[linectr][ctr++] = temp;
}
linectr++;
}
int trctr = 0;
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
Mrow[trctr++] = M[j][i];
hipMemcpy(MCuda, Mrow, sizeof(int)*Nsqr, hipMemcpyHostToDevice);
double start, end, initt;
////YOUR CODE GOES HERE
lli K;
K = 1LL << (N-1);
float result = 0.0;
float *p = (float*)(malloc(sizeof(float)));
*p = 1.0;
float *x = (float*)(malloc(sizeof(float)*N));
float *sumCol = (float*)(malloc(sizeof(float)*N));
float *lastCol = (float*)(malloc(sizeof(float)*N));
memset(sumCol, 0, sizeof(float)*N);
memset(lastCol, 0, sizeof(float)*N);
//lli GRID = prop.maxThreadsDim[0];
lli GRID = 1024*64;
lli BLOCK = 256;
lli TOTAL = GRID * BLOCK;
lli CHUNK = (K + (TOTAL-1)) / TOTAL;
lli xSIZE = TOTAL * N;
size_t SHARED = (BLOCK * N * sizeof(float));
float *pCuda;
float *xx;
hipMalloc((void**)&pCuda, sizeof(float));
hipMalloc((void**)&xx, sizeof(float)*N);
//printf("Device Variable Copying:\t%s\n", hipGetErrorString(hipGetLastError()));
//BEGINNING
initt = omp_get_wtime();
for(int i = 0; i< N; i++)
{
lastCol[i] = M[i][N-1];
for(int j = 0; j < N; j++)
sumCol[i] += M[i][j];
x[i] = (lastCol[i] - sumCol[i]/2);
(*p) *= x[i];
}
hipMemcpy(xx, x, sizeof(float)*N, hipMemcpyHostToDevice);
hipMemcpy(pCuda, p, sizeof(float), hipMemcpyHostToDevice);
start = omp_get_wtime();
hipLaunchKernelGGL(( PermanentCalculator), dim3(GRID), dim3(BLOCK), SHARED, 0, K, xx, pCuda, MCuda, CHUNK, N, xSIZE, TOTAL, Nsqr, BLOCK);
hipDeviceSynchronize();
hipMemcpy(p, pCuda, sizeof(float), hipMemcpyDeviceToHost);
result = (4 * (N & 1) - 2) * (*p);
//ENDING
end = omp_get_wtime();
cout << "Threads:" << TOTAL << "\tResult:" << result << "\tTime:" << end - start << "s" << "\tTotal Time:"<< end - initt << endl;
//cout << TOTAL << "," << result << "," << end - start << "\n";
for(int i = 0; i < N; i++)
free(M[i]);
free(p);
free(x);
free(sumCol);
free(lastCol);
hipFree(xx);
hipFree(pCuda);
hipFree(MCuda);
free(M);
free(Mrow);
return 0;
}
| 8aef60e6e9673d4a27e1291172a19b7e3355dfb5.cu | #include <iostream>
#include <fstream>
#include <sstream>
#include <omp.h>
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <stdint.h>
#include <cassert>
using namespace std;
typedef long long int lli;
void usage()
{
cout << "USAGE: ./exec <filename> <device>" << endl;
exit(0);
}
inline cudaError_t checkCuda(cudaError_t result) {
if (result != cudaSuccess) {
fprintf(stderr, "CUDA Runtime Error: %s\n", cudaGetErrorString(result));
assert(result == cudaSuccess);
}
return result;
}
__global__
void PreProcess(double* xCuda, double* xx, lli TOTAL, lli xSIZE)
{
lli ind = (blockDim.x * blockIdx.x + threadIdx.x);
double* my_xCuda = xCuda + ind;
lli index = 0;
for(lli j = 0; j < xSIZE; j+=TOTAL)
{
my_xCuda[j] = xx[index++];
}
}
__global__
void PermanentCalculator(lli K, float* xCuda, float* pCuda, int* MCuda, lli CHUNK, lli N, lli xSIZE, lli TOTAL, lli Nsqr, lli BLOCK)
{
lli index = (blockDim.x * blockIdx.x + threadIdx.x) * CHUNK;
if(index < K)
{
lli ind = threadIdx.x;
lli LIM = BLOCK * N;
extern __shared__ float M[];
float* temp_xCuda = M;
float* my_xCuda = temp_xCuda + ind;
lli new_ind = 0;
for(lli i = 0; i < LIM; i+=BLOCK)
my_xCuda[i] = xCuda[new_ind++];
__syncthreads();
lli START = index + 1;
lli END = index + CHUNK + 1;
lli yy = index ^ (index >> 1LL);
lli y, y_prev, FFS, z;
double s, prodSign;
float pSelf = 0.0;
lli temp_y = yy;
lli BPC = __popcll(yy);
for(lli n = 0; n < BPC; n++)
{
FFS = __ffsll(temp_y) - 1;
temp_y &= ~(1LL << FFS);
new_ind = 0;
for(lli m = 0; m < LIM; m += BLOCK)
{
my_xCuda[m] += MCuda[(new_ind++) + (FFS*N)];
__syncthreads();
}
}
prodSign = ((index + 1) & 1LL) ? -1.0 : 1.0;
for(lli ii = START; (ii < END) && (ii < K); ii++)
{
y = (ii ^ (ii >> 1LL));
y_prev = (ii - 1) ^ ((ii - 1) >> 1LL);
z = __ffsll(y ^ y_prev) - 1;
s = ((y >> z) & 1LL) ? 1.0 : -1.0;
new_ind = 0;
float temp = 1.0;
//#pragma unroll
for(lli jj = 0; jj < LIM; jj += BLOCK)
{
my_xCuda[jj] += (s * MCuda[(new_ind++) + (z * N)]);
temp *= my_xCuda[jj];
__syncthreads();
}
pSelf += (prodSign * temp);
prodSign *= -1.0;
}
atomicAdd(pCuda, pSelf);
}
}
int main(int argc, const char** argv)
{
if(argc != 3)
usage();
string line;
const char* filename = argv[1];
ifstream input (filename);
if(input.fail())
return 0;
int cudaDevice = atoi(argv[2]);
cudaSetDevice(cudaDevice);
cudaDeviceProp prop;
cudaGetDeviceProperties(&prop, cudaDevice);
lli N;
lli Nsqr;
int **M;
int *Mrow;
int *MCuda;
getline(input,line);
N = atoi(line.c_str());
Nsqr = N*N;
checkCuda(cudaMalloc((void**)&MCuda, sizeof(int)*Nsqr));
Mrow = (int*)(malloc(sizeof(int)*Nsqr));
M = (int**)(malloc(sizeof(int*)*N));
for(int i = 0; i < N; i ++)
{
M[i] = (int*)(malloc(sizeof(int)*N));
}
int linectr = 0;
while(getline(input,line))
{
stringstream ss(line);
int temp;
int ctr = 0;
while(ss >> temp)
{
M[linectr][ctr++] = temp;
}
linectr++;
}
int trctr = 0;
for(int i = 0; i < N; i++)
for(int j = 0; j < N; j++)
Mrow[trctr++] = M[j][i];
cudaMemcpy(MCuda, Mrow, sizeof(int)*Nsqr, cudaMemcpyHostToDevice);
double start, end, initt;
////YOUR CODE GOES HERE
lli K;
K = 1LL << (N-1);
float result = 0.0;
float *p = (float*)(malloc(sizeof(float)));
*p = 1.0;
float *x = (float*)(malloc(sizeof(float)*N));
float *sumCol = (float*)(malloc(sizeof(float)*N));
float *lastCol = (float*)(malloc(sizeof(float)*N));
memset(sumCol, 0, sizeof(float)*N);
memset(lastCol, 0, sizeof(float)*N);
//lli GRID = prop.maxThreadsDim[0];
lli GRID = 1024*64;
lli BLOCK = 256;
lli TOTAL = GRID * BLOCK;
lli CHUNK = (K + (TOTAL-1)) / TOTAL;
lli xSIZE = TOTAL * N;
size_t SHARED = (BLOCK * N * sizeof(float));
float *pCuda;
float *xx;
cudaMalloc((void**)&pCuda, sizeof(float));
cudaMalloc((void**)&xx, sizeof(float)*N);
//printf("Device Variable Copying:\t%s\n", cudaGetErrorString(cudaGetLastError()));
//BEGINNING
initt = omp_get_wtime();
for(int i = 0; i< N; i++)
{
lastCol[i] = M[i][N-1];
for(int j = 0; j < N; j++)
sumCol[i] += M[i][j];
x[i] = (lastCol[i] - sumCol[i]/2);
(*p) *= x[i];
}
cudaMemcpy(xx, x, sizeof(float)*N, cudaMemcpyHostToDevice);
cudaMemcpy(pCuda, p, sizeof(float), cudaMemcpyHostToDevice);
start = omp_get_wtime();
PermanentCalculator<<<GRID, BLOCK, SHARED>>>(K, xx, pCuda, MCuda, CHUNK, N, xSIZE, TOTAL, Nsqr, BLOCK);
cudaDeviceSynchronize();
cudaMemcpy(p, pCuda, sizeof(float), cudaMemcpyDeviceToHost);
result = (4 * (N & 1) - 2) * (*p);
//ENDING
end = omp_get_wtime();
cout << "Threads:" << TOTAL << "\tResult:" << result << "\tTime:" << end - start << "s" << "\tTotal Time:"<< end - initt << endl;
//cout << TOTAL << "," << result << "," << end - start << "\n";
for(int i = 0; i < N; i++)
free(M[i]);
free(p);
free(x);
free(sumCol);
free(lastCol);
cudaFree(xx);
cudaFree(pCuda);
cudaFree(MCuda);
free(M);
free(Mrow);
return 0;
}
|
5b9a32fcf8c4a6d1561d8ed5faead675fa4591b0.hip | // !!! This is a file automatically generated by hipify!!!
// #include "bunch.h"
#include "simParameters.cuh"
// random generator includes
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/xor_combine_engine.h>
#include <thrust/random.h>
#include <hiprand/hiprand_kernel.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/transform_reduce.h>
#include <map>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <math.h>
#include <boost/math/tools/roots.hpp>
#include <thrust/tuple.h>
#include <string>
#include "ste_global_functions.cu"
// #include "read_tfs.cu"
#include <vector>
#ifndef CUDA_PI_F
#define CUDA_PI_F 3.141592654f
#endif
#ifndef CUDA_C_F
#define CUDA_C_F 299792458.0f
#endif
using namespace std;
/* Calculate radiation damping times and equilibrium emittances if not given manually
* Reference: Chao, Tigner: Handbook of Accelerator physics and engineering, (1998) page 186
*
* uses radiationIntegrals struct -> see ste_global_functions.cu
*/
// __host__ __device__
// radiationIntegrals CalculateRadiationIntegralsApprox(radiationIntegralsParameters radiationIntParameters)
// {
// radiationIntegrals outputIntegralsApprox;
// // growth rates
// float alphax = 0.0;
// float alphay = 0.0;
// float gammax = (1.0 + pow(alphax,2)) / radiationIntParameters.betxRingAverage;
// float gammay = (1.0 + pow(alphay,2)) / radiationIntParameters.betyRingAverage;
// float Dx = radiationIntParameters.acceleratorLength / (2 * CUDA_PI_F * radiationIntParameters.gammaTransition);
// float Dy = 0.0;
// float Dxp = 0.1; // should find an approximation formula. However not very important
// float Dyp = 0.0;
// float Hx = (radiationIntParameters.betxRingAverage * Dxp + 2 * alphax * Dx * Dxp + gammax * Dx);
// float Hy = (radiationIntParameters.betyRingAverage * Dyp + 2 * alphay * Dy * Dyp + gammay * Dy);
// // define smooth approximation of radiation integrals
// outputIntegralsApprox.I2 = 2 * CUDA_PI_F / radiationIntParameters.DipoleBendingRadius;
// outputIntegralsApprox.I3 = 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2);
// outputIntegralsApprox.I4x = 0.0;
// outputIntegralsApprox.I4y = 0.0;
// outputIntegralsApprox.I5x = Hx * 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2);
// outputIntegralsApprox.I5y = Hy * 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2);
// return outputIntegralsApprox;
// };
// struct CalculateRadiationIntegralsLatticeElement
// {
// __host__ __device__
// radiationIntegrals operator()(tfsTableData& tfsAcceleratorElement, radiationIntegralsParameters& radiationIntParameters) const
// {
// radiationIntegrals outputIntegralsLattice;
// float angle = tfsAcceleratorElement.angle;
// float l = tfsAcceleratorElement.l;
// float k1l = tfsAcceleratorElement.k1l;
// float dy = tfsAcceleratorElement.dy;
// float k1s = tfsAcceleratorElement.k1sl;
// float alphax = tfsAcceleratorElement.alfx;
// float alphay = tfsAcceleratorElement.alfy;
// float betx = tfsAcceleratorElement.betx;
// float bety = tfsAcceleratorElement.bety;
// float dx = tfsAcceleratorElement.dx;
// float dpx = tfsAcceleratorElement.dpx;
// float dpy = tfsAcceleratorElement.dpy;
// float rhoi = ( angle > 0.0) ? l /angle : 0.0;
// float ki = (l > 0.0) ? k1l / l : 0.0 ;
// outputIntegralsLattice.I2 = (rhoi > 0.0) ? l / pow(rhoi,2) : 0.0 ;
// outputIntegralsLattice.I3 = (rhoi > 0.0) ? l / pow(rhoi,3) : 0.0 ;
// // corrected to equations in accelerator handbook Chao second edition p 220
// outputIntegralsLattice.I4x = (rhoi > 0.0) ? ((dx / pow(rhoi,3)) + 2 * (ki * dx + (k1s / l) * dy) / rhoi) *l : 0.0 ;
// outputIntegralsLattice.I4y = 0.0;
// float gammax = (1.0 + pow(alphax,2)) / betx;
// float gammay = (1.0 + pow(alphay,2)) / bety;
// float Hx = betx * pow(dpx,2) + 2. * alphax * dx * dpx + gammax * pow(dx,2);
// float Hy = bety * pow(dpy,2) + 2. * alphay * dy * dpy + gammay * pow(dy,2);
// outputIntegralsLattice.I5x = Hx * 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2) * l;
// outputIntegralsLattice.I5y = Hy * 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2) * l;
// return outputIntegralsLattice;
// }
// };
// radiationIntegrals CalculateRadiationIntegralsLatticeRing(thrust::device_vector<tfsTableData> tfsData, radiationIntegralsParameters params)
// {
// int n = tfsData.size();
// thrust::device_vector<radiationIntegrals> radiationIntegralsPerElement(n);
// thrust::transform(tfsData.begin(),tfsData.end(),thrust::make_constant_iterator(params),radiationIntegralsPerElement.begin(),CalculateRadiationIntegralsLatticeElement());
// radiationIntegrals initsum;
// initsum.I2 = 0.0;
// initsum.I3 = 0.0;
// initsum.I4x = 0.0;
// initsum.I4y = 0.0;
// initsum.I5x = 0.0;
// initsum.I5y = 0.0;
// radiationIntegrals total = thrust::reduce(radiationIntegralsPerElement.begin(),radiationIntegralsPerElement.end(),initsum,addRadiationIntegralsElements());
// return total;
// }
// float6 CalculateRadiationDampingTimesAndEquilib(radiationIntegralsParameters params, radiationIntegrals integrals)
// {
// float6 result;
// // Chao handbook second edition page 221 eq. 11
// // float CalphaEC = params.ParticleRadius * CUDA_C_F / (3 * params.acceleratorLength);
// float CalphaEC = params.ParticleRadius * CUDA_C_F / (3 * pow(CUDA_ELECTRON_REST_E_F,3)) * (pow(params.p0,3)/params.acceleratorLength);
// // extra factor 2 to get growth rates for emittances and not amplitudes (sigmas)
// float alphax = 2.0f * CalphaEC * integrals.I2 * (1.0f - integrals.I4x / integrals.I2);
// float alphay = 2.0f * CalphaEC * integrals.I2 * (1.0f - integrals.I4y / integrals.I2);
// float alphas = 2.0f * CalphaEC * integrals.I2 * (2.0f + (integrals.I4x + integrals.I4y) / integrals.I2);
// // longitudinal equilibrium
// // Chao handbook second edition page 221 eq. 19
// float sigEoE02 = params.cq * pow(params.gammar,2) * integrals.I3 / (2 * integrals.I2 + integrals.I4x + integrals.I4y);
// float sigsEquilib = (CUDA_C_F * abs(params.eta) / params.omegas) * sqrt(sigEoE02);
// // Chao handbook second edition page 221 eq. 12
// float Jx = 1. - integrals.I4x / integrals.I2;
// float Jy = 1. - integrals.I4y / integrals.I2;
// // transverse equilibrium
// float EmitEquilibx = params.cq * pow(params.gammar,2) * integrals.I5x / (Jx * integrals.I2);
// float EmitEquiliby = params.cq * pow(params.gammar,2) * integrals.I5y / (Jy * integrals.I2);
// if (EmitEquiliby == 0.0)
// EmitEquiliby = params.cq * params.betyRingAverage * integrals.I3 / (2 * Jy * integrals.I2);
// result.x = 1.0 / alphax; // damping time returned in seconds
// result.px = 1.0 / alphay;
// result.y = 1.0 / alphas;
// result.py = EmitEquilibx;
// result.t = EmitEquiliby;
// result.delta = sigsEquilib/CUDA_C_F; // sigs returned in seconds
// return result;
// }
// need to calculate particle radius seperatly !!!
struct RadiationDampingRoutineFunctor
{
RadiationDampingRoutineFunctor(float6 radiationDampParams, float trev, float timeratio, float seed) : radiationDampParams(radiationDampParams), trev(trev) ,timeratio(timeratio), seed(seed) {}
__host__ __device__ float6 operator()(float6 particle)
{
/*
* .x -> t_emitx
* .px -> t_emity
* .y -> t_sigs
* .py -> emit equilib x
* .t -> emit equilib y
* .delta -> sigs equilib
*/
float6 result;
unsigned int N = 1000;
thrust::default_random_engine rng;
rng.seed((int)seed);
rng.discard(N);
thrust::uniform_real_distribution<float> u01(0,1);
// timeratio is real machine turns over per simulation turn
float coeffdecaylong = 1 - (trev / radiationDampParams.y) * timeratio;
// excitation uses a uniform deviate on [-1:1]
float coeffexcitelong = radiationDampParams.delta * CUDA_C_F * sqrt(3.) * sqrt(1 - pow(coeffdecaylong,2));
// the damping time is for EMITTANCE, therefore need to multiply by 2
float coeffdecayx = 1 - ((trev /(2 * radiationDampParams.x)) * timeratio);
float coeffdecayy = 1 - ((trev /(2 * radiationDampParams.px)) * timeratio);
// exact coeffgrow= sigperp*sqrt(3.)*sqrt(1-coeffdecay**2)
// but trev << tradperp so
float coeffgrowx = radiationDampParams.py * sqrt(3.) * sqrt(1 - pow(coeffdecayx,2));
float coeffgrowy = radiationDampParams.t * sqrt(3.) * sqrt(1 - pow(coeffdecayy,2));
if ((radiationDampParams.x < 0.0) || (radiationDampParams.px) < 0.0)
return particle;
else
{
result.x = coeffdecayx * particle.x + coeffgrowx * (2*u01(rng)-1);
result.px = coeffdecayx * particle.px + coeffgrowx * (2*u01(rng)-1);
result.y = coeffdecayy * particle.y + coeffgrowy * (2*u01(rng)-1);
result.py = coeffdecayy * particle.py + coeffgrowy * (2*u01(rng)-1);
result.t = particle.t;
result.delta = coeffdecaylong * particle.delta + coeffexcitelong * (2*u01(rng)-1);
return result;
}
}
private:
float trev,seed;
float6 radiationDampParams;
float timeratio;
};
int main(int argc, char const *argv[])
{
double acceleratorLength = 240.00839; // length in meter
double gammar = 3326.817037; // relativistic gamma
double eta = 0.0007038773471 - 1 / pow(gammar ,2); // slip factor approx alpha - 1/ gammar**2
double betar = sqrt(1-1 / pow(gammar ,2)); // relativistic beta
double trev = acceleratorLength / (betar * CUDA_C_F);
double omega0 = (2 * CUDA_PI_F) / trev;
double p0 = 1.7e9;
radiationIntegrals radtest;
radiationIntegralsParameters intpars;
intpars.betxRingAverage = 10.0;
intpars.betyRingAverage = 20.0;
intpars.acceleratorLength = 240.00839;
intpars.gammaTransition = 1/ sqrt(0.0007038773471);
intpars.DipoleBendingRadius = 40.0;
intpars.ParticleRadius = CUDA_C_R_ELECTRON;
intpars.ParticleEnergy = 1.7e9;
// Chao handbook second edition page 221 eq. 20
intpars.cq = (55.0/(32.0 * sqrt(3))) * (CUDA_HBAR_F * CUDA_C_F) / (CUDA_ELECTRON_REST_E_F);
intpars.gammar = 3326.817037;
intpars.omegas = 0.0565621 * omega0;
intpars.eta = eta;
intpars.p0 = p0;
radtest = CalculateRadiationIntegralsApprox(intpars);
cout << radtest;
vector<vector<string> > out;
vector<vector<float> > fout;
string in = "/home/tmerten/mad/2017-12-21/twiss/Long-corrected-LongPMMM-2017-12-21.tfs";
out = ReadTfsTable(in,false);
map<string, int> maptest = mapOfColumns(out[0],false);
fout = TfsTableConvertStringToFloat(out,maptest);
thrust::device_vector<tfsTableData> testdata;
testdata = TfsTableToDevice(fout);
std::copy(testdata.begin(),testdata.begin()+10,std::ostream_iterator<tfsTableData>(std::cout, "\n"));
radiationIntegrals total = CalculateRadiationIntegralsLatticeRing(testdata, intpars);
cout << total ;
cout << "Radiation damping times and equilibrium" << endl;
cout << intpars.cq<< endl;
cout << intpars.ParticleRadius * CUDA_C_F / (3 * intpars.acceleratorLength) << endl;
cout << intpars.ParticleRadius * CUDA_C_F / (3 * pow(CUDA_ELECTRON_REST_E_F,3)) << endl;
cout << intpars.ParticleRadius * CUDA_C_F / (3 * pow(CUDA_ELECTRON_REST_E_F,3)) * (pow(p0,3)/intpars.acceleratorLength) << endl;
float6 timesequilib = CalculateRadiationDampingTimesAndEquilib(intpars, total);
cout << timesequilib<< endl;
intpars.omegas = 0.00784862 * omega0;
timesequilib = CalculateRadiationDampingTimesAndEquilib(intpars, total);
cout << timesequilib<< endl;
return 0;
}
// does radiation damping and quantum excitation once per turn
// implicit none
// integer, intent(inout) :: np
// !f2py intent(in,out) :: np
// double precision, intent(inout):: iseed
// !f2py intent(in,out) :: iseed
// integer ::k
// double precision, intent(in) :: tratio,trev,tradlong,tradperp,siglong,sigperp
// !f2py intent(in) :: tratio,trev,tradlong,siglong,sigperp
// double precision :: coeffdecaylong,coeffexcitelong,coeffgrow,coeffdecay
// double precision, intent(inout), dimension(np) ::x,px,y,py,pt
// !f2py intent(in,out) :: x,px,y,py,pt
// double precision,external :: ran
// integer, dimension(:), allocatable :: seed
// integer :: n
// ! init random generator
// call random_seed(size=n)
// allocate(seed(n))
// seed(1) = INT(iseed)
// call random_seed(put=seed)
// coeffdecaylong = 1 - ((trev / tradlong) * tratio)
// ! excitation uses a uniform deviate on [-1:1]
// coeffexcitelong = siglong * sqrt(3.) * sqrt(2 * (trev / tradlong) * tratio)
// ! tradperp is the damping time for EMITTANCE, therefore need to multiply by 2
// ! assume same damping in horizontal and vertical plane (I4x,I4y<<I2)
// coeffdecay = 1 - ((trev /(2 * tradperp)) * tratio)
// ! exact coeffgrow= sigperp*sqrt(3.)*sqrt(1-coeffdecay**2)
// ! but trev << tradperp so
// coeffgrow = sigperp * sqrt(3.) * sqrt(2 * (trev /(2 * tradperp)) * tratio)
// ! skip if transverse damping time is not positive
// if(tradperp.le.0)return
// do k=1,np
// ! longitudinal
// call random_number(iseed)
// pt(k) = pt(k)*coeffdecaylong +coeffexcitelong*(2*iseed-1)
// ! transverse
// x(k) = coeffdecay*x(k) + (2*iseed-1)*coeffgrow
// px(k) = coeffdecay*px(k)+(2*iseed-1)*coeffgrow
// y(k) = coeffdecay*y(k) + (2*iseed-1)*coeffgrow
// py(k) = coeffdecay*py(k)+(2*iseed-1)*coeffgrow
// enddo
// return
// end
| 5b9a32fcf8c4a6d1561d8ed5faead675fa4591b0.cu | // #include "bunch.h"
#include "simParameters.cuh"
// random generator includes
#include <thrust/random/linear_congruential_engine.h>
#include <thrust/random/xor_combine_engine.h>
#include <thrust/random.h>
#include <curand_kernel.h>
#include <thrust/iterator/counting_iterator.h>
#include <thrust/for_each.h>
#include <thrust/host_vector.h>
#include <thrust/device_vector.h>
#include <thrust/transform.h>
#include <thrust/sequence.h>
#include <thrust/copy.h>
#include <thrust/fill.h>
#include <thrust/replace.h>
#include <thrust/functional.h>
#include <thrust/execution_policy.h>
#include <thrust/device_ptr.h>
#include <thrust/transform_reduce.h>
#include <map>
#include <iostream>
#include <cstdlib>
#include <cmath>
#include <math.h>
#include <boost/math/tools/roots.hpp>
#include <thrust/tuple.h>
#include <string>
#include "ste_global_functions.cu"
// #include "read_tfs.cu"
#include <vector>
#ifndef CUDA_PI_F
#define CUDA_PI_F 3.141592654f
#endif
#ifndef CUDA_C_F
#define CUDA_C_F 299792458.0f
#endif
using namespace std;
/* Calculate radiation damping times and equilibrium emittances if not given manually
* Reference: Chao, Tigner: Handbook of Accelerator physics and engineering, (1998) page 186
*
* uses radiationIntegrals struct -> see ste_global_functions.cu
*/
// __host__ __device__
// radiationIntegrals CalculateRadiationIntegralsApprox(radiationIntegralsParameters radiationIntParameters)
// {
// radiationIntegrals outputIntegralsApprox;
// // growth rates
// float alphax = 0.0;
// float alphay = 0.0;
// float gammax = (1.0 + pow(alphax,2)) / radiationIntParameters.betxRingAverage;
// float gammay = (1.0 + pow(alphay,2)) / radiationIntParameters.betyRingAverage;
// float Dx = radiationIntParameters.acceleratorLength / (2 * CUDA_PI_F * radiationIntParameters.gammaTransition);
// float Dy = 0.0;
// float Dxp = 0.1; // should find an approximation formula. However not very important
// float Dyp = 0.0;
// float Hx = (radiationIntParameters.betxRingAverage * Dxp + 2 * alphax * Dx * Dxp + gammax * Dx);
// float Hy = (radiationIntParameters.betyRingAverage * Dyp + 2 * alphay * Dy * Dyp + gammay * Dy);
// // define smooth approximation of radiation integrals
// outputIntegralsApprox.I2 = 2 * CUDA_PI_F / radiationIntParameters.DipoleBendingRadius;
// outputIntegralsApprox.I3 = 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2);
// outputIntegralsApprox.I4x = 0.0;
// outputIntegralsApprox.I4y = 0.0;
// outputIntegralsApprox.I5x = Hx * 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2);
// outputIntegralsApprox.I5y = Hy * 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2);
// return outputIntegralsApprox;
// };
// struct CalculateRadiationIntegralsLatticeElement
// {
// __host__ __device__
// radiationIntegrals operator()(tfsTableData& tfsAcceleratorElement, radiationIntegralsParameters& radiationIntParameters) const
// {
// radiationIntegrals outputIntegralsLattice;
// float angle = tfsAcceleratorElement.angle;
// float l = tfsAcceleratorElement.l;
// float k1l = tfsAcceleratorElement.k1l;
// float dy = tfsAcceleratorElement.dy;
// float k1s = tfsAcceleratorElement.k1sl;
// float alphax = tfsAcceleratorElement.alfx;
// float alphay = tfsAcceleratorElement.alfy;
// float betx = tfsAcceleratorElement.betx;
// float bety = tfsAcceleratorElement.bety;
// float dx = tfsAcceleratorElement.dx;
// float dpx = tfsAcceleratorElement.dpx;
// float dpy = tfsAcceleratorElement.dpy;
// float rhoi = ( angle > 0.0) ? l /angle : 0.0;
// float ki = (l > 0.0) ? k1l / l : 0.0 ;
// outputIntegralsLattice.I2 = (rhoi > 0.0) ? l / pow(rhoi,2) : 0.0 ;
// outputIntegralsLattice.I3 = (rhoi > 0.0) ? l / pow(rhoi,3) : 0.0 ;
// // corrected to equations in accelerator handbook Chao second edition p 220
// outputIntegralsLattice.I4x = (rhoi > 0.0) ? ((dx / pow(rhoi,3)) + 2 * (ki * dx + (k1s / l) * dy) / rhoi) *l : 0.0 ;
// outputIntegralsLattice.I4y = 0.0;
// float gammax = (1.0 + pow(alphax,2)) / betx;
// float gammay = (1.0 + pow(alphay,2)) / bety;
// float Hx = betx * pow(dpx,2) + 2. * alphax * dx * dpx + gammax * pow(dx,2);
// float Hy = bety * pow(dpy,2) + 2. * alphay * dy * dpy + gammay * pow(dy,2);
// outputIntegralsLattice.I5x = Hx * 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2) * l;
// outputIntegralsLattice.I5y = Hy * 2 * CUDA_PI_F / pow(radiationIntParameters.DipoleBendingRadius,2) * l;
// return outputIntegralsLattice;
// }
// };
// radiationIntegrals CalculateRadiationIntegralsLatticeRing(thrust::device_vector<tfsTableData> tfsData, radiationIntegralsParameters params)
// {
// int n = tfsData.size();
// thrust::device_vector<radiationIntegrals> radiationIntegralsPerElement(n);
// thrust::transform(tfsData.begin(),tfsData.end(),thrust::make_constant_iterator(params),radiationIntegralsPerElement.begin(),CalculateRadiationIntegralsLatticeElement());
// radiationIntegrals initsum;
// initsum.I2 = 0.0;
// initsum.I3 = 0.0;
// initsum.I4x = 0.0;
// initsum.I4y = 0.0;
// initsum.I5x = 0.0;
// initsum.I5y = 0.0;
// radiationIntegrals total = thrust::reduce(radiationIntegralsPerElement.begin(),radiationIntegralsPerElement.end(),initsum,addRadiationIntegralsElements());
// return total;
// }
// float6 CalculateRadiationDampingTimesAndEquilib(radiationIntegralsParameters params, radiationIntegrals integrals)
// {
// float6 result;
// // Chao handbook second edition page 221 eq. 11
// // float CalphaEC = params.ParticleRadius * CUDA_C_F / (3 * params.acceleratorLength);
// float CalphaEC = params.ParticleRadius * CUDA_C_F / (3 * pow(CUDA_ELECTRON_REST_E_F,3)) * (pow(params.p0,3)/params.acceleratorLength);
// // extra factor 2 to get growth rates for emittances and not amplitudes (sigmas)
// float alphax = 2.0f * CalphaEC * integrals.I2 * (1.0f - integrals.I4x / integrals.I2);
// float alphay = 2.0f * CalphaEC * integrals.I2 * (1.0f - integrals.I4y / integrals.I2);
// float alphas = 2.0f * CalphaEC * integrals.I2 * (2.0f + (integrals.I4x + integrals.I4y) / integrals.I2);
// // longitudinal equilibrium
// // Chao handbook second edition page 221 eq. 19
// float sigEoE02 = params.cq * pow(params.gammar,2) * integrals.I3 / (2 * integrals.I2 + integrals.I4x + integrals.I4y);
// float sigsEquilib = (CUDA_C_F * abs(params.eta) / params.omegas) * sqrt(sigEoE02);
// // Chao handbook second edition page 221 eq. 12
// float Jx = 1. - integrals.I4x / integrals.I2;
// float Jy = 1. - integrals.I4y / integrals.I2;
// // transverse equilibrium
// float EmitEquilibx = params.cq * pow(params.gammar,2) * integrals.I5x / (Jx * integrals.I2);
// float EmitEquiliby = params.cq * pow(params.gammar,2) * integrals.I5y / (Jy * integrals.I2);
// if (EmitEquiliby == 0.0)
// EmitEquiliby = params.cq * params.betyRingAverage * integrals.I3 / (2 * Jy * integrals.I2);
// result.x = 1.0 / alphax; // damping time returned in seconds
// result.px = 1.0 / alphay;
// result.y = 1.0 / alphas;
// result.py = EmitEquilibx;
// result.t = EmitEquiliby;
// result.delta = sigsEquilib/CUDA_C_F; // sigs returned in seconds
// return result;
// }
// need to calculate particle radius seperatly !!!
struct RadiationDampingRoutineFunctor
{
RadiationDampingRoutineFunctor(float6 radiationDampParams, float trev, float timeratio, float seed) : radiationDampParams(radiationDampParams), trev(trev) ,timeratio(timeratio), seed(seed) {}
__host__ __device__ float6 operator()(float6 particle)
{
/*
* .x -> t_emitx
* .px -> t_emity
* .y -> t_sigs
* .py -> emit equilib x
* .t -> emit equilib y
* .delta -> sigs equilib
*/
float6 result;
unsigned int N = 1000;
thrust::default_random_engine rng;
rng.seed((int)seed);
rng.discard(N);
thrust::uniform_real_distribution<float> u01(0,1);
// timeratio is real machine turns over per simulation turn
float coeffdecaylong = 1 - (trev / radiationDampParams.y) * timeratio;
// excitation uses a uniform deviate on [-1:1]
float coeffexcitelong = radiationDampParams.delta * CUDA_C_F * sqrt(3.) * sqrt(1 - pow(coeffdecaylong,2));
// the damping time is for EMITTANCE, therefore need to multiply by 2
float coeffdecayx = 1 - ((trev /(2 * radiationDampParams.x)) * timeratio);
float coeffdecayy = 1 - ((trev /(2 * radiationDampParams.px)) * timeratio);
// exact coeffgrow= sigperp*sqrt(3.)*sqrt(1-coeffdecay**2)
// but trev << tradperp so
float coeffgrowx = radiationDampParams.py * sqrt(3.) * sqrt(1 - pow(coeffdecayx,2));
float coeffgrowy = radiationDampParams.t * sqrt(3.) * sqrt(1 - pow(coeffdecayy,2));
if ((radiationDampParams.x < 0.0) || (radiationDampParams.px) < 0.0)
return particle;
else
{
result.x = coeffdecayx * particle.x + coeffgrowx * (2*u01(rng)-1);
result.px = coeffdecayx * particle.px + coeffgrowx * (2*u01(rng)-1);
result.y = coeffdecayy * particle.y + coeffgrowy * (2*u01(rng)-1);
result.py = coeffdecayy * particle.py + coeffgrowy * (2*u01(rng)-1);
result.t = particle.t;
result.delta = coeffdecaylong * particle.delta + coeffexcitelong * (2*u01(rng)-1);
return result;
}
}
private:
float trev,seed;
float6 radiationDampParams;
float timeratio;
};
int main(int argc, char const *argv[])
{
double acceleratorLength = 240.00839; // length in meter
double gammar = 3326.817037; // relativistic gamma
double eta = 0.0007038773471 - 1 / pow(gammar ,2); // slip factor approx alpha - 1/ gammar**2
double betar = sqrt(1-1 / pow(gammar ,2)); // relativistic beta
double trev = acceleratorLength / (betar * CUDA_C_F);
double omega0 = (2 * CUDA_PI_F) / trev;
double p0 = 1.7e9;
radiationIntegrals radtest;
radiationIntegralsParameters intpars;
intpars.betxRingAverage = 10.0;
intpars.betyRingAverage = 20.0;
intpars.acceleratorLength = 240.00839;
intpars.gammaTransition = 1/ sqrt(0.0007038773471);
intpars.DipoleBendingRadius = 40.0;
intpars.ParticleRadius = CUDA_C_R_ELECTRON;
intpars.ParticleEnergy = 1.7e9;
// Chao handbook second edition page 221 eq. 20
intpars.cq = (55.0/(32.0 * sqrt(3))) * (CUDA_HBAR_F * CUDA_C_F) / (CUDA_ELECTRON_REST_E_F);
intpars.gammar = 3326.817037;
intpars.omegas = 0.0565621 * omega0;
intpars.eta = eta;
intpars.p0 = p0;
radtest = CalculateRadiationIntegralsApprox(intpars);
cout << radtest;
vector<vector<string> > out;
vector<vector<float> > fout;
string in = "/home/tmerten/mad/2017-12-21/twiss/Long-corrected-LongPMMM-2017-12-21.tfs";
out = ReadTfsTable(in,false);
map<string, int> maptest = mapOfColumns(out[0],false);
fout = TfsTableConvertStringToFloat(out,maptest);
thrust::device_vector<tfsTableData> testdata;
testdata = TfsTableToDevice(fout);
std::copy(testdata.begin(),testdata.begin()+10,std::ostream_iterator<tfsTableData>(std::cout, "\n"));
radiationIntegrals total = CalculateRadiationIntegralsLatticeRing(testdata, intpars);
cout << total ;
cout << "Radiation damping times and equilibrium" << endl;
cout << intpars.cq<< endl;
cout << intpars.ParticleRadius * CUDA_C_F / (3 * intpars.acceleratorLength) << endl;
cout << intpars.ParticleRadius * CUDA_C_F / (3 * pow(CUDA_ELECTRON_REST_E_F,3)) << endl;
cout << intpars.ParticleRadius * CUDA_C_F / (3 * pow(CUDA_ELECTRON_REST_E_F,3)) * (pow(p0,3)/intpars.acceleratorLength) << endl;
float6 timesequilib = CalculateRadiationDampingTimesAndEquilib(intpars, total);
cout << timesequilib<< endl;
intpars.omegas = 0.00784862 * omega0;
timesequilib = CalculateRadiationDampingTimesAndEquilib(intpars, total);
cout << timesequilib<< endl;
return 0;
}
// does radiation damping and quantum excitation once per turn
// implicit none
// integer, intent(inout) :: np
// !f2py intent(in,out) :: np
// double precision, intent(inout):: iseed
// !f2py intent(in,out) :: iseed
// integer ::k
// double precision, intent(in) :: tratio,trev,tradlong,tradperp,siglong,sigperp
// !f2py intent(in) :: tratio,trev,tradlong,siglong,sigperp
// double precision :: coeffdecaylong,coeffexcitelong,coeffgrow,coeffdecay
// double precision, intent(inout), dimension(np) ::x,px,y,py,pt
// !f2py intent(in,out) :: x,px,y,py,pt
// double precision,external :: ran
// integer, dimension(:), allocatable :: seed
// integer :: n
// ! init random generator
// call random_seed(size=n)
// allocate(seed(n))
// seed(1) = INT(iseed)
// call random_seed(put=seed)
// coeffdecaylong = 1 - ((trev / tradlong) * tratio)
// ! excitation uses a uniform deviate on [-1:1]
// coeffexcitelong = siglong * sqrt(3.) * sqrt(2 * (trev / tradlong) * tratio)
// ! tradperp is the damping time for EMITTANCE, therefore need to multiply by 2
// ! assume same damping in horizontal and vertical plane (I4x,I4y<<I2)
// coeffdecay = 1 - ((trev /(2 * tradperp)) * tratio)
// ! exact coeffgrow= sigperp*sqrt(3.)*sqrt(1-coeffdecay**2)
// ! but trev << tradperp so
// coeffgrow = sigperp * sqrt(3.) * sqrt(2 * (trev /(2 * tradperp)) * tratio)
// ! skip if transverse damping time is not positive
// if(tradperp.le.0)return
// do k=1,np
// ! longitudinal
// call random_number(iseed)
// pt(k) = pt(k)*coeffdecaylong +coeffexcitelong*(2*iseed-1)
// ! transverse
// x(k) = coeffdecay*x(k) + (2*iseed-1)*coeffgrow
// px(k) = coeffdecay*px(k)+(2*iseed-1)*coeffgrow
// y(k) = coeffdecay*y(k) + (2*iseed-1)*coeffgrow
// py(k) = coeffdecay*py(k)+(2*iseed-1)*coeffgrow
// enddo
// return
// end
|
465eedc3567d83b39fd8b66d603da8390a2c04b7.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////////////////////////////
// MNRT License
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2010 Mathias Neumann, www.maneumann.com.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// 3. Neither the name Mathias Neumann, nor the names of contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \file GPU\raygen.cu
///
/// \brief raygen kernels class.
/// \author Mathias Neumann
/// \date 13.02.2010
/// \ingroup globalillum
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "KernelDefs.h"
#include "CameraModel.h"
#include "RayPool.h"
#include "MNCudaMT.h"
#include "MNCudaMemPool.h"
#include "mncudautil_dev.h"
#include "sample_dev.h"
/// Simple struct to store matrices in constant memory.
struct Matrix
{
/// The matrix elements.
float elems[4][4];
};
// Constant memory data.
/// Camera to world space transformation matrix constant memory variable.
__constant__ Matrix c_matCam2World;
/// Raster to camera space transformation matrix constant memory variable.
__constant__ Matrix c_matRaster2Cam;
/// Material properties for current scene. Constant memory variable.
__constant__ MaterialProperties c_Materials;
/// Triangle material index texture, one per triangle.
texture<uint, 1, hipReadModeElementType> tex_TriMatIdx;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \DEVICEFN
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __device__ float3 dev_transformPoint(float trans[4][4], float3 p)
///
/// \brief Transforms a point using given transform matrix.
///
/// \author Mathias Neumann
/// \date 05.04.2010
///
/// \param trans Elements of the 4x4 transformation matrix.
/// \param p The point. Will be converted to homogeneous representation, i.e.
/// \code [p.x, p.y, p.z, 1]^T \endcode
///
/// \return Transformed point.
////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 dev_transformPoint(float trans[4][4], float3 p)
{
float3 res;
// The homogeneous representation for points is [x, y, z, 1]^T.
res.x = trans[0][0]*p.x + trans[0][1]*p.y + trans[0][2]*p.z + trans[0][3];
res.y = trans[1][0]*p.x + trans[1][1]*p.y + trans[1][2]*p.z + trans[1][3];
res.z = trans[2][0]*p.x + trans[2][1]*p.y + trans[2][2]*p.z + trans[2][3];
float w = trans[3][0]*p.x + trans[3][1]*p.y + trans[3][2]*p.z + trans[3][3];
if(w != 1.f)
res /= w;
return res;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __device__ float3 dev_transformVector(float trans[4][4], float3 v)
///
/// \brief Transforms a vector using given transform matrix.
///
/// \author Mathias Neumann
/// \date 05.04.2010
///
/// \param trans Elements of the 4x4 transformation matrix.
/// \param v The vector. Will be converted to homogeneous representation, i.e.
/// \code [v.x, v.y, v.z, 0]^T \endcode
///
/// \return Transformed vector.
////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 dev_transformVector(float trans[4][4], float3 v)
{
float3 res;
// Note: The homogeneous coords for v are [x, y, z, 0]^T.
res.x = trans[0][0]*v.x + trans[0][1]*v.y + trans[0][2]*v.z;
res.y = trans[1][0]*v.x + trans[1][1]*v.y + trans[1][2]*v.z;
res.z = trans[2][0]*v.x + trans[2][1]*v.y + trans[2][2]*v.z;
return res;
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \KERNELS
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_genPrimaryRays(uint nScreenW, uint nScreenH, float idxSampleX,
/// float invSamplesPerPixelX, float idxSampleY, float invSamplesPerPixelY, float clipHither,
/// float clipYon, float* d_randoms1, float* d_randoms2, RayChunk outChunk)
///
/// \brief Generates primary ray for ray tracing.
///
/// The rays are ordered using the Morton order (Z-curve). This was proposed by Aila et al..
/// Also check http://en.wikipedia.org/wiki/Z-order_%28curve%29. All primary rays for
/// the given sample index are moved into a single ray chunk.
///
/// \author Mathias Neumann
/// \date March 2010
///
/// \param nScreenW Screen width in pixels.
/// \param nScreenH Screen height in pixels.
/// \param idxSampleX Sample index X (for stratified sampling).
/// \param invSamplesPerPixelX Inverse of the number of samples per pixel X.
/// \param idxSampleY Sample index Y (for stratified sampling).
/// \param invSamplesPerPixelY Inverse of the number of samples per pixel Y.
/// \param clipHither Near clipping plane distance.
/// \param clipYon Far clipping plane distance.
/// \param [in] d_randoms1 First uniform random numbers, one for each pixel. Used for
/// stratified sampling.
/// \param [in] d_randoms2 Second uniform random numbers, one for each pixel. Used for
/// stratified sampling.
/// \param outChunk The target ray chunk. Is assumed to be empty. Do not forget to
/// set ray count after kernel execution.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_genPrimaryRays(uint nScreenW, uint nScreenH,
float idxSampleX, float invSamplesPerPixelX,
float idxSampleY, float invSamplesPerPixelY,
float clipHither, float clipYon,
float* d_randoms1, float* d_randoms2,
RayChunk outChunk)
{
uint idxPixel = blockIdx.x*blockDim.x + threadIdx.x;
if(idxPixel < nScreenW*nScreenH)
{
// Assign rays following the Morton order (Z-curve). This was proposed by Aila2009.
// See http://en.wikipedia.org/wiki/Z-order_%28curve%29
// Extract even bits for x and odd bits for y raster coordinate.
uint x = 0, y = 0;
uint srcPos = 0; // Starting with lsb bit 0.
uint targetPos = 0;
uint mask = 1;
// Get raster coordinates for this thread.
while(mask <= idxPixel)
{
bool isOdd = srcPos & 1;
if(!isOdd && (mask & idxPixel)) // even bit set?
x |= 1 << targetPos;
if( isOdd && (mask & idxPixel)) // odd bit set?
y |= 1 << targetPos;
// Update mask.
mask <<= 1;
srcPos++;
// Increase target position in case we are done with the odd bit.
if(isOdd)
targetPos++;
}
float rnd1 = d_randoms1[idxPixel];
float rnd2 = d_randoms2[idxPixel];
// Stratify samples.
rnd1 = (idxSampleX + rnd1) * invSamplesPerPixelX;
rnd1 = (idxSampleY + rnd1) * invSamplesPerPixelY;
// Generate camera sample from raster sample.
float3 ptRaster;
if(invSamplesPerPixelX*invSamplesPerPixelY < 1.f)
ptRaster = make_float3(float(x) + rnd1, float(y) + rnd2, 0.f); // See PBR p. 309
else
ptRaster = make_float3(float(x) + 0.5f, float(y) + 0.5f, 0.f);
float3 originCam = dev_transformPoint(c_matRaster2Cam.elems, ptRaster);
float3 originWorld = dev_transformPoint(c_matCam2World.elems, originCam);
// originCam is also our direction in *camera* space, but normalized!
float3 dirCam = normalize(originCam);
float3 dirWorld = dev_transformVector(c_matCam2World.elems, dirCam);
dirWorld = normalize(dirWorld);
// The world origin is generated by transformation
outChunk.d_origins[idxPixel] = make_float4(originWorld);
outChunk.d_dirs[idxPixel] = make_float4(dirWorld);
// Initialize with filter value.
outChunk.d_influences[idxPixel] = make_float4(1.0f);
// Set pixel's vertex buffer object index.
outChunk.d_pixels[idxPixel] = y * nScreenW + x;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_genReflectedRays(RayChunk chunkSrc, ShadingPoints shadingPts,
/// RayChunk outChunk, uint* d_outIsValidRay)
///
/// \brief Generates secondary rays for specular reflection.
///
/// Calls ::dev_SampleDirectionSpecReflect() to generate reflected direction. Rays are
/// flagged as invalid when their influence RayChunk::d_influences[i] falls below \c 0.01f
/// for all components.
///
/// \author Mathias Neumann
/// \date March 2010
///
/// \param chunkSrc Source ray chunk.
/// \param shadingPts Shading points (hit points) of source rays.
/// \param outChunk Target ray chunk. Is assumed to be empty. Do not forget to
/// set ray count after kernel execution.
/// \param [out] d_outIsValidRay Binary 0/1 array. Will contain 1 for valid rays, 0 for invalid
/// rays. The latter can be removed by compaction.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_genReflectedRays(RayChunk chunkSrc, ShadingPoints shadingPts,
RayChunk outChunk, uint* d_outIsValidRay)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// We compacted the shading points, so no invalid triangle indices.
if(idx < shadingPts.numPoints)
{
// Intersection point is ray source.
outChunk.d_origins[idx] = shadingPts.d_ptInter[idx];
// Get ray direction.
float3 vSrcRayDir = make_float3(chunkSrc.d_dirs[idx]);
int idxTri = shadingPts.d_idxTris[idx];
// Fetch shading normal.
float4 n4 = shadingPts.d_normalsS[idx];
float3 nS = make_float3(n4.x, n4.y, n4.z);
uint idxMaterial = tex1Dfetch(tex_TriMatIdx, idxTri);
float3 specColor = c_Materials.clrSpec[idxMaterial];
float3 vReflected;
float pdf; // Will be one, so no need to divide by.
float3 f = dev_SampleDirectionSpecReflect(-vSrcRayDir, nS,
0.f, 0.f, specColor, &vReflected, &pdf);
outChunk.d_dirs[idx] = make_float4(vReflected);
float3 infl = f * fabsf(dot(vReflected, nS)) * make_float3(chunkSrc.d_influences[idx]);
outChunk.d_influences[idx] = make_float4(infl);
outChunk.d_pixels[idx] = chunkSrc.d_pixels[idx];
// Mark low influence rays as invalid to avoid tracing them.
uint isValid = ((infl.x >= 0.01f || infl.y >= 0.01f || infl.z >= 0.01f) ? 1 : 0);
d_outIsValidRay[idx] = isValid;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_genTransmittedRays(RayChunk chunkSrc, ShadingPoints shadingPts,
/// RayChunk outChunk, uint* d_outIsValidRay)
///
/// \brief Generates secondary rays for specular transmission.
///
/// Calls ::dev_SampleDirectionSpecTransmit() to generate transmitted direction. Rays are
/// flagged as invalid when their influence RayChunk::d_influences[i] falls below \c 0.01f
/// for all components.
///
/// \author Mathias Neumann
/// \date March 2010
///
/// \param chunkSrc Source ray chunk.
/// \param shadingPts Shading points (hit points) of source rays.
/// \param outChunk Target ray chunk. Is assumed to be empty. Do not forget to
/// set ray count after kernel execution.
/// \param [out] d_outIsValidRay Binary 0/1 array. Will contain 1 for valid rays, 0 for invalid
/// rays. The latter can be removed by compaction.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_genTransmittedRays(RayChunk chunkSrc, ShadingPoints shadingPts,
RayChunk outChunk, uint* d_outIsValidRay)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < shadingPts.numPoints)
{
// Triangle index is valid since we compacted the shading point array.
int idxTri = shadingPts.d_idxTris[idx];
uint idxMaterial = tex1Dfetch(tex_TriMatIdx, idxTri);
// Intersection point is ray source.
outChunk.d_origins[idx] = shadingPts.d_ptInter[idx];
// Fetch shading normal.
float4 n4 = shadingPts.d_normalsS[idx];
float3 nS = make_float3(n4.x, n4.y, n4.z);
// Get source ray direction.
float3 vSrcRayDir = make_float3(chunkSrc.d_dirs[idx]);
// Get indices of refraction in correct order.
float n_from = 1.f;
float n_to = c_Materials.indexRefrac[idxMaterial];
if(dot(nS, -vSrcRayDir) < 0.f)
{
// Swap...
float temp = n_from;
n_from = n_to;
n_to = temp;
// Now ensure normal and -vSrcRayDir lie in the same hemisphere.
nS *= -1.f;
}
// Sample refracted direction from BTDF, see PBR, p. 433.
float transAlpha = c_Materials.transAlpha[idxMaterial];
float3 clrTransmit = c_Materials.clrDiff[idxMaterial] * (1.f - transAlpha);
float3 vRefract;
float pdf; // Will be one, so no need to divide by.
float3 f = dev_SampleDirectionSpecTransmit(-vSrcRayDir, nS,
0.f, 0.f, clrTransmit, n_from/n_to, false, &vRefract, &pdf);
outChunk.d_dirs[idx] = make_float4(vRefract);
float3 infl = f * fabsf(dot(vRefract, nS)) * make_float3(chunkSrc.d_influences[idx]);
outChunk.d_influences[idx] = make_float4(infl);
outChunk.d_pixels[idx] = chunkSrc.d_pixels[idx];
// Mark low influence rays as invalid to avoid tracing them.
uint isValid = ((infl.x >= 0.01f || infl.y >= 0.01f || infl.z >= 0.01f) ? 1 : 0);
d_outIsValidRay[idx] = isValid;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_genFinalGatherRays(ShadingPoints shadingPts, float4* d_clrDiffHit,
/// float* d_randoms1, float* d_randoms2, float idxFGRayX, float invNumFGRaysX,
/// float idxFGRayY, float invNumFGRaysY, RayChunk outChunk)
///
/// \brief Generates gather rays for final gathering.
///
/// Currently only diffuse BRDFs are supported for final gathering. For them, the
/// ::dev_SampleDirectionLambertian() function is used to sample directions for gather rays.
///
/// \author Mathias Neumann
/// \date 12.04.2010
///
/// \param shadingPts Source shading points for final gather rays.
/// \param [in] d_clrDiffHit Diffuse material color at each shading point. Used for
/// diffuse BRDF evaluation. \c xyz contains color and \c w
/// transparency alpha.
/// \param [in] d_randoms1 First uniform random number array, one per shading point.
/// Used for direction sampling.
/// \param [in] d_randoms2 Second uniform random number array, one per shading point.
/// Used for direction sampling.
/// \param idxFGRayX Final gather ray index X (for stratified sampling).
/// \param invNumFGRaysX Inverse of the number of final gather rays X.
/// \param idxFGRayY Final gather ray index Y (for stratified sampling).
/// \param invNumFGRaysY Inverse of the number of final gather rays Y.
/// \param outChunk Target ray chunk for gather rays. Is assumed to be empty. Do
/// not forget to set ray count after kernel execution.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_genFinalGatherRays(ShadingPoints shadingPts, float4* d_clrDiffHit,
float* d_randoms1, float* d_randoms2,
float idxFGRayX, float invNumFGRaysX,
float idxFGRayY, float invNumFGRaysY,
RayChunk outChunk)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
// We compacted the intersection result, so no invalid triangle indices.
if(tid < shadingPts.numPoints)
{
// It's important to use the geometric normal here. Else gathering would not work correctly
// as ray casting depends on the geometry! See PBR p. 761.
float3 n_g = make_float3(shadingPts.d_normalsG[tid]);
// Get diffuse color of hit triangle.
float4 clrDiffHit4 = d_clrDiffHit[tid];
float transAlpha = clrDiffHit4.w;
float3 clrDiffHit = make_float3(clrDiffHit4) * transAlpha;
// Sample a random direction in the same hemisphere for diffuse reflection.
// NOTE: I tried multiple RNGs for this. The QRNG using radical inverses didn't work and
// lead to high noise and errors in the picture. A simple LCG RNG wasn't better.
// Therefore I now use pregenerated random numbers from the Mersenne Twister of
// the CUDA SDK 3.0.
float rnd1 = d_randoms1[tid];
float rnd2 = d_randoms2[tid];
// Stratify samples. In both directions to get best cache performance. I use a very basic
// stratification here, but noise reduction results are OK.
rnd1 = (idxFGRayX + rnd1) * invNumFGRaysX;
rnd2 = (idxFGRayY + rnd2) * invNumFGRaysY;
// Assume that the source ray arrived from the upper hemisphere with respect to the
// geometric normal. As we do not perform final gathering at specular surfaces, this
// assumption is valid as long as the camera is not within some object. Basically we
// can avoid keeping track of the incoming directions for the given shading points
// when using this assumption.
float pdf = 0.f;
float3 w_i = make_float3(1.f, 0.f, 0.f);
float3 w_o = n_g; // See above.
float3 f_r = dev_SampleDirectionLambertian(w_o, n_g, rnd1, rnd2, clrDiffHit, &w_i, &pdf);
// Do not perform final gathering for specular surfaces. There is just NO WAY to generate
// final gather rays physically.
bool hasNonSpecular = clrDiffHit.x != 0.f || clrDiffHit.y != 0.f || clrDiffHit.z != 0.f;
// Alpha, that is the ray influence, should contain the PI / numSamples value for final gathering
// for irradinace, see PBR p. 762.
float fgScale = 0.f;
if(pdf != 0.f && hasNonSpecular)
fgScale = MN_PI * (invNumFGRaysX * invNumFGRaysY);
// Avoid evaluation in case w_i and w_o lie in different hemispheres
// with respect to n_g. PBR p. 465 or VeachPhD, p. 153.
//if(dot(w_o, n_g) * dot(w_i, n_g) <= 0.f)
// fgScale = 0.f;
// Store the new ray.
outChunk.d_origins[tid] = shadingPts.d_ptInter[tid];
outChunk.d_dirs[tid] = make_float4(w_i);
outChunk.d_influences[tid] = make_float4(fgScale);
outChunk.d_pixels[tid] = shadingPts.d_pixels[tid];
}
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void RTUpdateRayGenKernels(const TriangleData& tris, const MaterialData& mats)
///
/// \brief Binds textures and sets constant memory variables.
///
/// \author Mathias Neumann
/// \date 13.02.2010
///
/// \param tris Triangle data for current scene.
/// \param mats Material data for current scene.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void RTUpdateRayGenKernels(const TriangleData& tris, const MaterialData& mats)
{
mncudaSafeCallNoSync(hipMemcpyToSymbol("c_Materials", &mats.matProps, sizeof(MaterialProperties)));
hipChannelFormatDesc cdUint = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindUnsigned);
tex_TriMatIdx.normalized = false;
mncudaSafeCallNoSync(hipBindTexture(NULL, tex_TriMatIdx, tris.d_idxMaterial, cdUint, tris.numTris*sizeof(uint)));
}
/// Unbinds textures used for ray generation kernels.
extern "C"
void RTCleanupRayGenKernels()
{
mncudaSafeCallNoSync(hipUnbindTexture(tex_TriMatIdx));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \WRAPPERS
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
/// Wraps kernel_genPrimaryRays() kernel call.
extern "C"
void KernelRTPrimaryKernel(CameraModel* pCamera,
uint idxSampleX, uint samplesPerPixelX, uint idxSampleY, uint samplesPerPixelY,
RayChunk& outChunk)
{
uint screenW = pCamera->GetScreenWidth();
uint screenH = pCamera->GetScreenHeight();
MNTransform cam2world = pCamera->GetCamera2World();
MNTransform raster2cam = pCamera->GetRaster2Camera();
// Move matrices to constant memory.
// WARNING: Cannot pass matrix[4][4] variables per parameter in kernel!
Matrix matCam2World;
for(uint i=0; i<4; i++)
for(uint j=0; j<4; j++)
matCam2World.elems[i][j] = cam2world.GetMatrix(i, j);
mncudaSafeCallNoSync(hipMemcpyToSymbol("c_matCam2World", &matCam2World, sizeof(Matrix)));
Matrix matRaster2Cam;
for(uint i=0; i<4; i++)
for(uint j=0; j<4; j++)
matRaster2Cam.elems[i][j] = raster2cam.GetMatrix(i, j);
mncudaSafeCallNoSync(hipMemcpyToSymbol("c_matRaster2Cam", &matRaster2Cam, sizeof(Matrix)));
uint numPixels = screenW*screenH;
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(numPixels, blockSize.x), 1);
MNCudaMT& mtw = MNCudaMT::GetInstance();
uint numRnd = mtw.GetAlignedCount(numPixels);
MNCudaMemory<float> d_randoms(2*numRnd);
mtw.Seed(rand());
mncudaSafeCallNoSync(mtw.Generate(d_randoms, 2*numRnd));
// Generate primary rays.
float invSamplesPerPixelX = ((samplesPerPixelX > 1) ? 1.f / float(samplesPerPixelX) : 1.f);
float invSamplesPerPixelY = ((samplesPerPixelY > 1) ? 1.f / float(samplesPerPixelY) : 1.f);
hipLaunchKernelGGL(( kernel_genPrimaryRays), dim3(gridSize), dim3(blockSize), 0, 0,
screenW, screenH, (float)idxSampleX, invSamplesPerPixelX, (float)idxSampleY, invSamplesPerPixelY,
pCamera->GetClipHither(), pCamera->GetClipYon(),
d_randoms, d_randoms+numRnd, outChunk);
MNCUDA_CHECKERROR;
// Update chunk status.
outChunk.rekDepth = 0;
outChunk.numRays = numPixels;
}
/// Wraps kernel_genReflectedRays() kernel call.
extern "C"
void KernelRTReflectedKernel(RayChunk& chunkSrc, ShadingPoints& shadingPts,
TriangleData& triData, RayChunk& outChunk, uint* d_outIsValid)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(chunkSrc.numRays, blockSize.x), 1, 1);
// Generate rays into out chunk.
hipLaunchKernelGGL(( kernel_genReflectedRays), dim3(gridSize), dim3(blockSize), 0, 0,
chunkSrc, shadingPts, outChunk, d_outIsValid);
MNCUDA_CHECKERROR;
// Increase recursion depth.
outChunk.rekDepth = chunkSrc.rekDepth + 1;
outChunk.numRays = chunkSrc.numRays;
}
/// Wraps kernel_genTransmittedRays() kernel call.
extern "C"
void KernelRTTransmittedKernel(RayChunk& chunkSrc, ShadingPoints& shadingPts,
TriangleData& triData, RayChunk& outChunk, uint* d_outIsValid)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(chunkSrc.numRays, blockSize.x), 1, 1);
// Generate rays into out chunk.
hipLaunchKernelGGL(( kernel_genTransmittedRays), dim3(gridSize), dim3(blockSize), 0, 0,
chunkSrc, shadingPts, outChunk, d_outIsValid);
MNCUDA_CHECKERROR;
// Increase recursion depth.
outChunk.rekDepth = chunkSrc.rekDepth + 1;
outChunk.numRays = chunkSrc.numRays;
}
/// Wraps kernel_genFinalGatherRays() kernel call.
extern "C"
void KernelRTFinalGatherRays(const ShadingPoints& shadingPts, float4* d_clrDiffHit,
float* d_randoms1, float* d_randoms2,
uint idxFGRayX, uint numFGRaysX,
uint idxFGRayY, uint numFGRaysY,
RayChunk& outChunk)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(shadingPts.numPoints, blockSize.x), 1, 1);
float invNumRaysX = ((numFGRaysX > 1) ? 1.f / float(numFGRaysX) : 1.f);
float invNumRaysY = ((numFGRaysY > 1) ? 1.f / float(numFGRaysY) : 1.f);
hipLaunchKernelGGL(( kernel_genFinalGatherRays), dim3(gridSize), dim3(blockSize), 0, 0, shadingPts, d_clrDiffHit, d_randoms1, d_randoms2,
(float)idxFGRayX, invNumRaysX, (float)idxFGRayY, invNumRaysY, outChunk);
MNCUDA_CHECKERROR;
outChunk.rekDepth = 0;
outChunk.numRays = shadingPts.numPoints;
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////// | 465eedc3567d83b39fd8b66d603da8390a2c04b7.cu | ////////////////////////////////////////////////////////////////////////////////////////////////////
// MNRT License
////////////////////////////////////////////////////////////////////////////////////////////////////
//
// Copyright (c) 2010 Mathias Neumann, www.maneumann.com.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are
// permitted provided that the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
//
// 2. Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation and/or
// other materials provided with the distribution.
//
// 3. Neither the name Mathias Neumann, nor the names of contributors may be
// used to endorse or promote products derived from this software without
// specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
// ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
// MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
// COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
// EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE
// GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
// ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
// OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \file GPU\raygen.cu
///
/// \brief raygen kernels class.
/// \author Mathias Neumann
/// \date 13.02.2010
/// \ingroup globalillum
////////////////////////////////////////////////////////////////////////////////////////////////////
#include "KernelDefs.h"
#include "CameraModel.h"
#include "RayPool.h"
#include "MNCudaMT.h"
#include "MNCudaMemPool.h"
#include "mncudautil_dev.h"
#include "sample_dev.h"
/// Simple struct to store matrices in constant memory.
struct Matrix
{
/// The matrix elements.
float elems[4][4];
};
// Constant memory data.
/// Camera to world space transformation matrix constant memory variable.
__constant__ Matrix c_matCam2World;
/// Raster to camera space transformation matrix constant memory variable.
__constant__ Matrix c_matRaster2Cam;
/// Material properties for current scene. Constant memory variable.
__constant__ MaterialProperties c_Materials;
/// Triangle material index texture, one per triangle.
texture<uint, 1, cudaReadModeElementType> tex_TriMatIdx;
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \DEVICEFN
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __device__ float3 dev_transformPoint(float trans[4][4], float3 p)
///
/// \brief Transforms a point using given transform matrix.
///
/// \author Mathias Neumann
/// \date 05.04.2010
///
/// \param trans Elements of the 4x4 transformation matrix.
/// \param p The point. Will be converted to homogeneous representation, i.e.
/// \code [p.x, p.y, p.z, 1]^T \endcode
///
/// \return Transformed point.
////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 dev_transformPoint(float trans[4][4], float3 p)
{
float3 res;
// The homogeneous representation for points is [x, y, z, 1]^T.
res.x = trans[0][0]*p.x + trans[0][1]*p.y + trans[0][2]*p.z + trans[0][3];
res.y = trans[1][0]*p.x + trans[1][1]*p.y + trans[1][2]*p.z + trans[1][3];
res.z = trans[2][0]*p.x + trans[2][1]*p.y + trans[2][2]*p.z + trans[2][3];
float w = trans[3][0]*p.x + trans[3][1]*p.y + trans[3][2]*p.z + trans[3][3];
if(w != 1.f)
res /= w;
return res;
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __device__ float3 dev_transformVector(float trans[4][4], float3 v)
///
/// \brief Transforms a vector using given transform matrix.
///
/// \author Mathias Neumann
/// \date 05.04.2010
///
/// \param trans Elements of the 4x4 transformation matrix.
/// \param v The vector. Will be converted to homogeneous representation, i.e.
/// \code [v.x, v.y, v.z, 0]^T \endcode
///
/// \return Transformed vector.
////////////////////////////////////////////////////////////////////////////////////////////////////
__device__ float3 dev_transformVector(float trans[4][4], float3 v)
{
float3 res;
// Note: The homogeneous coords for v are [x, y, z, 0]^T.
res.x = trans[0][0]*v.x + trans[0][1]*v.y + trans[0][2]*v.z;
res.y = trans[1][0]*v.x + trans[1][1]*v.y + trans[1][2]*v.z;
res.z = trans[2][0]*v.x + trans[2][1]*v.y + trans[2][2]*v.z;
return res;
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \KERNELS
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_genPrimaryRays(uint nScreenW, uint nScreenH, float idxSampleX,
/// float invSamplesPerPixelX, float idxSampleY, float invSamplesPerPixelY, float clipHither,
/// float clipYon, float* d_randoms1, float* d_randoms2, RayChunk outChunk)
///
/// \brief Generates primary ray for ray tracing.
///
/// The rays are ordered using the Morton order (Z-curve). This was proposed by Aila et al..
/// Also check http://en.wikipedia.org/wiki/Z-order_%28curve%29. All primary rays for
/// the given sample index are moved into a single ray chunk.
///
/// \author Mathias Neumann
/// \date March 2010
///
/// \param nScreenW Screen width in pixels.
/// \param nScreenH Screen height in pixels.
/// \param idxSampleX Sample index X (for stratified sampling).
/// \param invSamplesPerPixelX Inverse of the number of samples per pixel X.
/// \param idxSampleY Sample index Y (for stratified sampling).
/// \param invSamplesPerPixelY Inverse of the number of samples per pixel Y.
/// \param clipHither Near clipping plane distance.
/// \param clipYon Far clipping plane distance.
/// \param [in] d_randoms1 First uniform random numbers, one for each pixel. Used for
/// stratified sampling.
/// \param [in] d_randoms2 Second uniform random numbers, one for each pixel. Used for
/// stratified sampling.
/// \param outChunk The target ray chunk. Is assumed to be empty. Do not forget to
/// set ray count after kernel execution.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_genPrimaryRays(uint nScreenW, uint nScreenH,
float idxSampleX, float invSamplesPerPixelX,
float idxSampleY, float invSamplesPerPixelY,
float clipHither, float clipYon,
float* d_randoms1, float* d_randoms2,
RayChunk outChunk)
{
uint idxPixel = blockIdx.x*blockDim.x + threadIdx.x;
if(idxPixel < nScreenW*nScreenH)
{
// Assign rays following the Morton order (Z-curve). This was proposed by Aila2009.
// See http://en.wikipedia.org/wiki/Z-order_%28curve%29
// Extract even bits for x and odd bits for y raster coordinate.
uint x = 0, y = 0;
uint srcPos = 0; // Starting with lsb bit 0.
uint targetPos = 0;
uint mask = 1;
// Get raster coordinates for this thread.
while(mask <= idxPixel)
{
bool isOdd = srcPos & 1;
if(!isOdd && (mask & idxPixel)) // even bit set?
x |= 1 << targetPos;
if( isOdd && (mask & idxPixel)) // odd bit set?
y |= 1 << targetPos;
// Update mask.
mask <<= 1;
srcPos++;
// Increase target position in case we are done with the odd bit.
if(isOdd)
targetPos++;
}
float rnd1 = d_randoms1[idxPixel];
float rnd2 = d_randoms2[idxPixel];
// Stratify samples.
rnd1 = (idxSampleX + rnd1) * invSamplesPerPixelX;
rnd1 = (idxSampleY + rnd1) * invSamplesPerPixelY;
// Generate camera sample from raster sample.
float3 ptRaster;
if(invSamplesPerPixelX*invSamplesPerPixelY < 1.f)
ptRaster = make_float3(float(x) + rnd1, float(y) + rnd2, 0.f); // See PBR p. 309
else
ptRaster = make_float3(float(x) + 0.5f, float(y) + 0.5f, 0.f);
float3 originCam = dev_transformPoint(c_matRaster2Cam.elems, ptRaster);
float3 originWorld = dev_transformPoint(c_matCam2World.elems, originCam);
// originCam is also our direction in *camera* space, but normalized!
float3 dirCam = normalize(originCam);
float3 dirWorld = dev_transformVector(c_matCam2World.elems, dirCam);
dirWorld = normalize(dirWorld);
// The world origin is generated by transformation
outChunk.d_origins[idxPixel] = make_float4(originWorld);
outChunk.d_dirs[idxPixel] = make_float4(dirWorld);
// Initialize with filter value.
outChunk.d_influences[idxPixel] = make_float4(1.0f);
// Set pixel's vertex buffer object index.
outChunk.d_pixels[idxPixel] = y * nScreenW + x;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_genReflectedRays(RayChunk chunkSrc, ShadingPoints shadingPts,
/// RayChunk outChunk, uint* d_outIsValidRay)
///
/// \brief Generates secondary rays for specular reflection.
///
/// Calls ::dev_SampleDirectionSpecReflect() to generate reflected direction. Rays are
/// flagged as invalid when their influence RayChunk::d_influences[i] falls below \c 0.01f
/// for all components.
///
/// \author Mathias Neumann
/// \date March 2010
///
/// \param chunkSrc Source ray chunk.
/// \param shadingPts Shading points (hit points) of source rays.
/// \param outChunk Target ray chunk. Is assumed to be empty. Do not forget to
/// set ray count after kernel execution.
/// \param [out] d_outIsValidRay Binary 0/1 array. Will contain 1 for valid rays, 0 for invalid
/// rays. The latter can be removed by compaction.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_genReflectedRays(RayChunk chunkSrc, ShadingPoints shadingPts,
RayChunk outChunk, uint* d_outIsValidRay)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
// We compacted the shading points, so no invalid triangle indices.
if(idx < shadingPts.numPoints)
{
// Intersection point is ray source.
outChunk.d_origins[idx] = shadingPts.d_ptInter[idx];
// Get ray direction.
float3 vSrcRayDir = make_float3(chunkSrc.d_dirs[idx]);
int idxTri = shadingPts.d_idxTris[idx];
// Fetch shading normal.
float4 n4 = shadingPts.d_normalsS[idx];
float3 nS = make_float3(n4.x, n4.y, n4.z);
uint idxMaterial = tex1Dfetch(tex_TriMatIdx, idxTri);
float3 specColor = c_Materials.clrSpec[idxMaterial];
float3 vReflected;
float pdf; // Will be one, so no need to divide by.
float3 f = dev_SampleDirectionSpecReflect(-vSrcRayDir, nS,
0.f, 0.f, specColor, &vReflected, &pdf);
outChunk.d_dirs[idx] = make_float4(vReflected);
float3 infl = f * fabsf(dot(vReflected, nS)) * make_float3(chunkSrc.d_influences[idx]);
outChunk.d_influences[idx] = make_float4(infl);
outChunk.d_pixels[idx] = chunkSrc.d_pixels[idx];
// Mark low influence rays as invalid to avoid tracing them.
uint isValid = ((infl.x >= 0.01f || infl.y >= 0.01f || infl.z >= 0.01f) ? 1 : 0);
d_outIsValidRay[idx] = isValid;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_genTransmittedRays(RayChunk chunkSrc, ShadingPoints shadingPts,
/// RayChunk outChunk, uint* d_outIsValidRay)
///
/// \brief Generates secondary rays for specular transmission.
///
/// Calls ::dev_SampleDirectionSpecTransmit() to generate transmitted direction. Rays are
/// flagged as invalid when their influence RayChunk::d_influences[i] falls below \c 0.01f
/// for all components.
///
/// \author Mathias Neumann
/// \date March 2010
///
/// \param chunkSrc Source ray chunk.
/// \param shadingPts Shading points (hit points) of source rays.
/// \param outChunk Target ray chunk. Is assumed to be empty. Do not forget to
/// set ray count after kernel execution.
/// \param [out] d_outIsValidRay Binary 0/1 array. Will contain 1 for valid rays, 0 for invalid
/// rays. The latter can be removed by compaction.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_genTransmittedRays(RayChunk chunkSrc, ShadingPoints shadingPts,
RayChunk outChunk, uint* d_outIsValidRay)
{
uint idx = blockIdx.x * blockDim.x + threadIdx.x;
if(idx < shadingPts.numPoints)
{
// Triangle index is valid since we compacted the shading point array.
int idxTri = shadingPts.d_idxTris[idx];
uint idxMaterial = tex1Dfetch(tex_TriMatIdx, idxTri);
// Intersection point is ray source.
outChunk.d_origins[idx] = shadingPts.d_ptInter[idx];
// Fetch shading normal.
float4 n4 = shadingPts.d_normalsS[idx];
float3 nS = make_float3(n4.x, n4.y, n4.z);
// Get source ray direction.
float3 vSrcRayDir = make_float3(chunkSrc.d_dirs[idx]);
// Get indices of refraction in correct order.
float n_from = 1.f;
float n_to = c_Materials.indexRefrac[idxMaterial];
if(dot(nS, -vSrcRayDir) < 0.f)
{
// Swap...
float temp = n_from;
n_from = n_to;
n_to = temp;
// Now ensure normal and -vSrcRayDir lie in the same hemisphere.
nS *= -1.f;
}
// Sample refracted direction from BTDF, see PBR, p. 433.
float transAlpha = c_Materials.transAlpha[idxMaterial];
float3 clrTransmit = c_Materials.clrDiff[idxMaterial] * (1.f - transAlpha);
float3 vRefract;
float pdf; // Will be one, so no need to divide by.
float3 f = dev_SampleDirectionSpecTransmit(-vSrcRayDir, nS,
0.f, 0.f, clrTransmit, n_from/n_to, false, &vRefract, &pdf);
outChunk.d_dirs[idx] = make_float4(vRefract);
float3 infl = f * fabsf(dot(vRefract, nS)) * make_float3(chunkSrc.d_influences[idx]);
outChunk.d_influences[idx] = make_float4(infl);
outChunk.d_pixels[idx] = chunkSrc.d_pixels[idx];
// Mark low influence rays as invalid to avoid tracing them.
uint isValid = ((infl.x >= 0.01f || infl.y >= 0.01f || infl.z >= 0.01f) ? 1 : 0);
d_outIsValidRay[idx] = isValid;
}
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn __global__ void kernel_genFinalGatherRays(ShadingPoints shadingPts, float4* d_clrDiffHit,
/// float* d_randoms1, float* d_randoms2, float idxFGRayX, float invNumFGRaysX,
/// float idxFGRayY, float invNumFGRaysY, RayChunk outChunk)
///
/// \brief Generates gather rays for final gathering.
///
/// Currently only diffuse BRDFs are supported for final gathering. For them, the
/// ::dev_SampleDirectionLambertian() function is used to sample directions for gather rays.
///
/// \author Mathias Neumann
/// \date 12.04.2010
///
/// \param shadingPts Source shading points for final gather rays.
/// \param [in] d_clrDiffHit Diffuse material color at each shading point. Used for
/// diffuse BRDF evaluation. \c xyz contains color and \c w
/// transparency alpha.
/// \param [in] d_randoms1 First uniform random number array, one per shading point.
/// Used for direction sampling.
/// \param [in] d_randoms2 Second uniform random number array, one per shading point.
/// Used for direction sampling.
/// \param idxFGRayX Final gather ray index X (for stratified sampling).
/// \param invNumFGRaysX Inverse of the number of final gather rays X.
/// \param idxFGRayY Final gather ray index Y (for stratified sampling).
/// \param invNumFGRaysY Inverse of the number of final gather rays Y.
/// \param outChunk Target ray chunk for gather rays. Is assumed to be empty. Do
/// not forget to set ray count after kernel execution.
////////////////////////////////////////////////////////////////////////////////////////////////////
__global__ void kernel_genFinalGatherRays(ShadingPoints shadingPts, float4* d_clrDiffHit,
float* d_randoms1, float* d_randoms2,
float idxFGRayX, float invNumFGRaysX,
float idxFGRayY, float invNumFGRaysY,
RayChunk outChunk)
{
uint tid = blockIdx.x * blockDim.x + threadIdx.x;
// We compacted the intersection result, so no invalid triangle indices.
if(tid < shadingPts.numPoints)
{
// It's important to use the geometric normal here. Else gathering would not work correctly
// as ray casting depends on the geometry! See PBR p. 761.
float3 n_g = make_float3(shadingPts.d_normalsG[tid]);
// Get diffuse color of hit triangle.
float4 clrDiffHit4 = d_clrDiffHit[tid];
float transAlpha = clrDiffHit4.w;
float3 clrDiffHit = make_float3(clrDiffHit4) * transAlpha;
// Sample a random direction in the same hemisphere for diffuse reflection.
// NOTE: I tried multiple RNGs for this. The QRNG using radical inverses didn't work and
// lead to high noise and errors in the picture. A simple LCG RNG wasn't better.
// Therefore I now use pregenerated random numbers from the Mersenne Twister of
// the CUDA SDK 3.0.
float rnd1 = d_randoms1[tid];
float rnd2 = d_randoms2[tid];
// Stratify samples. In both directions to get best cache performance. I use a very basic
// stratification here, but noise reduction results are OK.
rnd1 = (idxFGRayX + rnd1) * invNumFGRaysX;
rnd2 = (idxFGRayY + rnd2) * invNumFGRaysY;
// Assume that the source ray arrived from the upper hemisphere with respect to the
// geometric normal. As we do not perform final gathering at specular surfaces, this
// assumption is valid as long as the camera is not within some object. Basically we
// can avoid keeping track of the incoming directions for the given shading points
// when using this assumption.
float pdf = 0.f;
float3 w_i = make_float3(1.f, 0.f, 0.f);
float3 w_o = n_g; // See above.
float3 f_r = dev_SampleDirectionLambertian(w_o, n_g, rnd1, rnd2, clrDiffHit, &w_i, &pdf);
// Do not perform final gathering for specular surfaces. There is just NO WAY to generate
// final gather rays physically.
bool hasNonSpecular = clrDiffHit.x != 0.f || clrDiffHit.y != 0.f || clrDiffHit.z != 0.f;
// Alpha, that is the ray influence, should contain the PI / numSamples value for final gathering
// for irradinace, see PBR p. 762.
float fgScale = 0.f;
if(pdf != 0.f && hasNonSpecular)
fgScale = MN_PI * (invNumFGRaysX * invNumFGRaysY);
// Avoid evaluation in case w_i and w_o lie in different hemispheres
// with respect to n_g. PBR p. 465 or VeachPhD, p. 153.
//if(dot(w_o, n_g) * dot(w_i, n_g) <= 0.f)
// fgScale = 0.f;
// Store the new ray.
outChunk.d_origins[tid] = shadingPts.d_ptInter[tid];
outChunk.d_dirs[tid] = make_float4(w_i);
outChunk.d_influences[tid] = make_float4(fgScale);
outChunk.d_pixels[tid] = shadingPts.d_pixels[tid];
}
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \fn extern "C" void RTUpdateRayGenKernels(const TriangleData& tris, const MaterialData& mats)
///
/// \brief Binds textures and sets constant memory variables.
///
/// \author Mathias Neumann
/// \date 13.02.2010
///
/// \param tris Triangle data for current scene.
/// \param mats Material data for current scene.
////////////////////////////////////////////////////////////////////////////////////////////////////
extern "C"
void RTUpdateRayGenKernels(const TriangleData& tris, const MaterialData& mats)
{
mncudaSafeCallNoSync(cudaMemcpyToSymbol("c_Materials", &mats.matProps, sizeof(MaterialProperties)));
cudaChannelFormatDesc cdUint = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindUnsigned);
tex_TriMatIdx.normalized = false;
mncudaSafeCallNoSync(cudaBindTexture(NULL, tex_TriMatIdx, tris.d_idxMaterial, cdUint, tris.numTris*sizeof(uint)));
}
/// Unbinds textures used for ray generation kernels.
extern "C"
void RTCleanupRayGenKernels()
{
mncudaSafeCallNoSync(cudaUnbindTexture(tex_TriMatIdx));
}
////////////////////////////////////////////////////////////////////////////////////////////////////
/// \WRAPPERS
////////////////////////////////////////////////////////////////////////////////////////////////////
//@{
/// Wraps kernel_genPrimaryRays() kernel call.
extern "C"
void KernelRTPrimaryKernel(CameraModel* pCamera,
uint idxSampleX, uint samplesPerPixelX, uint idxSampleY, uint samplesPerPixelY,
RayChunk& outChunk)
{
uint screenW = pCamera->GetScreenWidth();
uint screenH = pCamera->GetScreenHeight();
MNTransform cam2world = pCamera->GetCamera2World();
MNTransform raster2cam = pCamera->GetRaster2Camera();
// Move matrices to constant memory.
// WARNING: Cannot pass matrix[4][4] variables per parameter in kernel!
Matrix matCam2World;
for(uint i=0; i<4; i++)
for(uint j=0; j<4; j++)
matCam2World.elems[i][j] = cam2world.GetMatrix(i, j);
mncudaSafeCallNoSync(cudaMemcpyToSymbol("c_matCam2World", &matCam2World, sizeof(Matrix)));
Matrix matRaster2Cam;
for(uint i=0; i<4; i++)
for(uint j=0; j<4; j++)
matRaster2Cam.elems[i][j] = raster2cam.GetMatrix(i, j);
mncudaSafeCallNoSync(cudaMemcpyToSymbol("c_matRaster2Cam", &matRaster2Cam, sizeof(Matrix)));
uint numPixels = screenW*screenH;
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(numPixels, blockSize.x), 1);
MNCudaMT& mtw = MNCudaMT::GetInstance();
uint numRnd = mtw.GetAlignedCount(numPixels);
MNCudaMemory<float> d_randoms(2*numRnd);
mtw.Seed(rand());
mncudaSafeCallNoSync(mtw.Generate(d_randoms, 2*numRnd));
// Generate primary rays.
float invSamplesPerPixelX = ((samplesPerPixelX > 1) ? 1.f / float(samplesPerPixelX) : 1.f);
float invSamplesPerPixelY = ((samplesPerPixelY > 1) ? 1.f / float(samplesPerPixelY) : 1.f);
kernel_genPrimaryRays<<<gridSize, blockSize>>>(
screenW, screenH, (float)idxSampleX, invSamplesPerPixelX, (float)idxSampleY, invSamplesPerPixelY,
pCamera->GetClipHither(), pCamera->GetClipYon(),
d_randoms, d_randoms+numRnd, outChunk);
MNCUDA_CHECKERROR;
// Update chunk status.
outChunk.rekDepth = 0;
outChunk.numRays = numPixels;
}
/// Wraps kernel_genReflectedRays() kernel call.
extern "C"
void KernelRTReflectedKernel(RayChunk& chunkSrc, ShadingPoints& shadingPts,
TriangleData& triData, RayChunk& outChunk, uint* d_outIsValid)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(chunkSrc.numRays, blockSize.x), 1, 1);
// Generate rays into out chunk.
kernel_genReflectedRays<<<gridSize, blockSize>>>(
chunkSrc, shadingPts, outChunk, d_outIsValid);
MNCUDA_CHECKERROR;
// Increase recursion depth.
outChunk.rekDepth = chunkSrc.rekDepth + 1;
outChunk.numRays = chunkSrc.numRays;
}
/// Wraps kernel_genTransmittedRays() kernel call.
extern "C"
void KernelRTTransmittedKernel(RayChunk& chunkSrc, ShadingPoints& shadingPts,
TriangleData& triData, RayChunk& outChunk, uint* d_outIsValid)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(chunkSrc.numRays, blockSize.x), 1, 1);
// Generate rays into out chunk.
kernel_genTransmittedRays<<<gridSize, blockSize>>>(
chunkSrc, shadingPts, outChunk, d_outIsValid);
MNCUDA_CHECKERROR;
// Increase recursion depth.
outChunk.rekDepth = chunkSrc.rekDepth + 1;
outChunk.numRays = chunkSrc.numRays;
}
/// Wraps kernel_genFinalGatherRays() kernel call.
extern "C"
void KernelRTFinalGatherRays(const ShadingPoints& shadingPts, float4* d_clrDiffHit,
float* d_randoms1, float* d_randoms2,
uint idxFGRayX, uint numFGRaysX,
uint idxFGRayY, uint numFGRaysY,
RayChunk& outChunk)
{
dim3 blockSize = dim3(256, 1, 1);
dim3 gridSize = dim3(MNCUDA_DIVUP(shadingPts.numPoints, blockSize.x), 1, 1);
float invNumRaysX = ((numFGRaysX > 1) ? 1.f / float(numFGRaysX) : 1.f);
float invNumRaysY = ((numFGRaysY > 1) ? 1.f / float(numFGRaysY) : 1.f);
kernel_genFinalGatherRays<<<gridSize, blockSize>>>(shadingPts, d_clrDiffHit, d_randoms1, d_randoms2,
(float)idxFGRayX, invNumRaysX, (float)idxFGRayY, invNumRaysY, outChunk);
MNCUDA_CHECKERROR;
outChunk.rekDepth = 0;
outChunk.numRays = shadingPts.numPoints;
}
//@}
////////////////////////////////////////////////////////////////////////////////////////////////////
//////////////////////////////////////////////////////////////////////////////////////////////////// |
ff581dcee45ad8fd6e18b719a857a63d7c48b556.hip | // !!! This is a file automatically generated by hipify!!!
/*************************************************
** Accelereyes Training Day 1 **
** Vector Addition **
** **
** This program will add two vectors and store **
** the result in a third vector using the GPU **
*************************************************/
#include <iostream>
#include <vector>
#include "hip/hip_runtime.h"
#include "../common.h"
__global__ void add(int* a, int* b, int* c) {
// calculate global id
int index = blockIdx.x * blockDim.x + threadIdx.x;
// perform calculation
c[index] = a[index] + b[index];
}
int main(void) {
using namespace std;
long N = 1000 * 10;
size_t size = N * sizeof(int);
// initialize device pointers and allocate memory on the GPU
int *a_d, *b_d, *c_d;
hipMalloc(&a_d, size);
hipMalloc(&b_d, size);
hipMalloc(&c_d, size);
// initalize data on hosta
int *a_h = new int[N];
int *b_h = new int[N];
for (int i = 0; i < N; i++) {
a_h[i] = 1;
b_h[i] = 2;
}
// move host data to the GPU
hipMemcpy(a_d, a_h, size, hipMemcpyHostToDevice);
hipMemcpy(b_d, b_h, size, hipMemcpyHostToDevice);
// launch kernel
hipLaunchKernelGGL(( add), dim3(10), dim3(1000), 0, 0, a_d, b_d, c_d);
CUDA(hipPeekAtLastError());
CUDA(hipDeviceSynchronize());
// get the results from the GPU
int *c_h = new int[N];
hipMemcpy(c_h, c_d, size, hipMemcpyDeviceToHost);
// print results
for(int i = 0; i < N; ++i) {
cout << c_h[i] << ", ";
}
cout << "\n";
free(a_h);
free(b_h);
free(c_h);
hipFree(a_d);
hipFree(b_d);
hipFree(c_d);
return 0;
}
| ff581dcee45ad8fd6e18b719a857a63d7c48b556.cu | /*************************************************
** Accelereyes Training Day 1 **
** Vector Addition **
** **
** This program will add two vectors and store **
** the result in a third vector using the GPU **
*************************************************/
#include <iostream>
#include <vector>
#include "cuda.h"
#include "../common.h"
__global__ void add(int* a, int* b, int* c) {
// calculate global id
int index = blockIdx.x * blockDim.x + threadIdx.x;
// perform calculation
c[index] = a[index] + b[index];
}
int main(void) {
using namespace std;
long N = 1000 * 10;
size_t size = N * sizeof(int);
// initialize device pointers and allocate memory on the GPU
int *a_d, *b_d, *c_d;
cudaMalloc(&a_d, size);
cudaMalloc(&b_d, size);
cudaMalloc(&c_d, size);
// initalize data on hosta
int *a_h = new int[N];
int *b_h = new int[N];
for (int i = 0; i < N; i++) {
a_h[i] = 1;
b_h[i] = 2;
}
// move host data to the GPU
cudaMemcpy(a_d, a_h, size, cudaMemcpyHostToDevice);
cudaMemcpy(b_d, b_h, size, cudaMemcpyHostToDevice);
// launch kernel
add<<<10, 1000>>>(a_d, b_d, c_d);
CUDA(cudaPeekAtLastError());
CUDA(cudaDeviceSynchronize());
// get the results from the GPU
int *c_h = new int[N];
cudaMemcpy(c_h, c_d, size, cudaMemcpyDeviceToHost);
// print results
for(int i = 0; i < N; ++i) {
cout << c_h[i] << ", ";
}
cout << "\n";
free(a_h);
free(b_h);
free(c_h);
cudaFree(a_d);
cudaFree(b_d);
cudaFree(c_d);
return 0;
}
|
05bcbbf4fdfffd94ce617f1652b7890cd3565b72.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <string>
#include <vector>
#include <sys/time.h>
#include <sys/resource.h>
#include <errno.h>
#include <assert.h>
#include <limits.h>
#include <float.h>
#include "voigt.h"
// The standard test case sizes
#ifndef PROBLEM_SIZE
#define PROBLEM_SIZE 4096
#endif
using namespace std;
int main(int argc, char* argv[])
{
/* events and timing variables */
hipEvent_t kernel_b, kernel_e;
float kernel_elapsed;
hipEventCreate(&kernel_b);
hipEventCreate(&kernel_e);
int i=0;
int j=0;
float vals;
float step = 32.0/PROBLEM_SIZE;
/* check for CUDA devices on host system *
* and print device info if found */
int ndev;
int dev;
hipGetDeviceCount(&ndev);
struct hipDeviceProp_t deviceProp;
if (ndev == 0)
printf("No CUDA devices found.\n");
else if(ndev == 1)
printf("\nFound %d CUDA device:\n", ndev);
else
printf("\nFound %d CUDA devices:\n", ndev);
for(dev=0; dev<ndev; ++dev){
hipGetDeviceProperties(&deviceProp, dev);
printf(" CUDA Device %d - %s\n", dev, deviceProp.name);
printf(" Clock rate: %10.2f MHz\n", (float)deviceProp.clockRate/1000.0);
printf(" Global memory: %10d MB\n", deviceProp.totalGlobalMem/1048576);
printf(" Constant memory: %10d KB\n", deviceProp.totalConstMem/1024);
printf(" Shared memory per block: %10d KB\n", deviceProp.sharedMemPerBlock/1024);
printf(" Registers per block: %10d \n", deviceProp.regsPerBlock);
printf(" Warp Size: %10d \n", deviceProp.warpSize);
printf(" Max Threads per block: %10d \n", deviceProp.maxThreadsPerBlock);
printf(" Max Block Dims (X Y Z): %10d %5d %5d\n", deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max Grid Dims (X Y Z): %10d %5d %5d\n", deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf("\n");
}
/* configure the problem decomposition */
int nthds; // threads per block
int ntblks_x; // blocks in x
int ntblks_y; // blocks in y
/* The three values below control the thread block / grid layout */
/* They must be set before the program can be compiled */
nthds = ; // # of threads in a block
ntblks_x = ; // # of blocks in the grid in X
ntblks_y = ; // # of blocks in the grid in Y
dim3 dimGrid(ntblks_x,ntblks_y);
dim3 dimBlock(nthds);
cout << "GPU Grid Decomposition:" << endl;
cout << " " << PROBLEM_SIZE*PROBLEM_SIZE << " total points " << endl;
cout << " " << ntblks_x << " thread blocks in X" << endl;
cout << " " << ntblks_y << " thread blocks in Y" << endl;
cout << " " << nthds << " threads per block" << endl;
/* allocate space on host and device for input and output data */
float *h_damp, *h_offs, *h_vval;
float *d_damp, *d_offs, *d_vval;
size_t memSize = PROBLEM_SIZE*PROBLEM_SIZE*sizeof(float);
/* allocate host memory */
h_damp = (float *) malloc(memSize);
h_offs = (float *) malloc(memSize);
h_vval = (float *) malloc(memSize);
/* allocate device memory */
hipMalloc((void**)&d_damp, memSize);
hipMalloc((void**)&d_offs, memSize);
hipMalloc((void**)&d_vval, memSize);
assert(FLT_EPSILON < step);
vector <float> row;
row.resize(PROBLEM_SIZE);
/* assign initial data values */
vals = 10.0;
for(i=0; i<PROBLEM_SIZE; ++i){
vals += step;
for(j=0; j<PROBLEM_SIZE; ++j){
h_damp[i*PROBLEM_SIZE + j] = vals;
h_offs[j*PROBLEM_SIZE + i] = vals;
}
}
/* transfer data CPU -> GPU */
hipMemcpy((void*) d_damp, (void*) h_damp, memSize, hipMemcpyHostToDevice);
hipMemcpy((void*) d_offs, (void*) h_offs, memSize, hipMemcpyHostToDevice);
/*** ---- main compute kernel ----- ***/
/*** this is where the magic happens ***/
hipEventRecord(kernel_b, 0);
hipLaunchKernelGGL(( my_voigt), dim3(dimGrid), dim3(dimBlock), 0, 0, d_damp, d_offs, d_vval);
hipEventRecord(kernel_e, 0);
hipEventSynchronize(kernel_e);
/* transfer data GPU -> CPU */
hipMemcpy((void*) h_vval, (void*) d_vval, memSize, hipMemcpyDeviceToHost);
/* print verification values */
cout << endl << "Verification values:"<<endl;
cout << "-------------------"<<endl;
for(i=PROBLEM_SIZE/2; i<PROBLEM_SIZE/2 + 5; i++){
for(j=0; j<2; j++){
cout << h_vval[i*PROBLEM_SIZE + j] << " ";
}
cout << endl;
}
cout << "-------------------"<<endl;
/* print information about elapsed time */
hipEventElapsedTime(&kernel_elapsed, kernel_b, kernel_e);
cout << "-----------------------------------------" << endl;
cout << "Elapsed times (msec): "<< endl;
cout << " - voigt kernel: " << kernel_elapsed << endl;
cout << "-----------------------------------------" << endl;
}
| 05bcbbf4fdfffd94ce617f1652b7890cd3565b72.cu | #include <iostream>
#include <string>
#include <vector>
#include <sys/time.h>
#include <sys/resource.h>
#include <errno.h>
#include <assert.h>
#include <limits.h>
#include <float.h>
#include "voigt.h"
// The standard test case sizes
#ifndef PROBLEM_SIZE
#define PROBLEM_SIZE 4096
#endif
using namespace std;
int main(int argc, char* argv[])
{
/* events and timing variables */
cudaEvent_t kernel_b, kernel_e;
float kernel_elapsed;
cudaEventCreate(&kernel_b);
cudaEventCreate(&kernel_e);
int i=0;
int j=0;
float vals;
float step = 32.0/PROBLEM_SIZE;
/* check for CUDA devices on host system *
* and print device info if found */
int ndev;
int dev;
cudaGetDeviceCount(&ndev);
struct cudaDeviceProp deviceProp;
if (ndev == 0)
printf("No CUDA devices found.\n");
else if(ndev == 1)
printf("\nFound %d CUDA device:\n", ndev);
else
printf("\nFound %d CUDA devices:\n", ndev);
for(dev=0; dev<ndev; ++dev){
cudaGetDeviceProperties(&deviceProp, dev);
printf(" CUDA Device %d - %s\n", dev, deviceProp.name);
printf(" Clock rate: %10.2f MHz\n", (float)deviceProp.clockRate/1000.0);
printf(" Global memory: %10d MB\n", deviceProp.totalGlobalMem/1048576);
printf(" Constant memory: %10d KB\n", deviceProp.totalConstMem/1024);
printf(" Shared memory per block: %10d KB\n", deviceProp.sharedMemPerBlock/1024);
printf(" Registers per block: %10d \n", deviceProp.regsPerBlock);
printf(" Warp Size: %10d \n", deviceProp.warpSize);
printf(" Max Threads per block: %10d \n", deviceProp.maxThreadsPerBlock);
printf(" Max Block Dims (X Y Z): %10d %5d %5d\n", deviceProp.maxThreadsDim[0],
deviceProp.maxThreadsDim[1],
deviceProp.maxThreadsDim[2]);
printf(" Max Grid Dims (X Y Z): %10d %5d %5d\n", deviceProp.maxGridSize[0],
deviceProp.maxGridSize[1],
deviceProp.maxGridSize[2]);
printf("\n");
}
/* configure the problem decomposition */
int nthds; // threads per block
int ntblks_x; // blocks in x
int ntblks_y; // blocks in y
/* The three values below control the thread block / grid layout */
/* They must be set before the program can be compiled */
nthds = ; // # of threads in a block
ntblks_x = ; // # of blocks in the grid in X
ntblks_y = ; // # of blocks in the grid in Y
dim3 dimGrid(ntblks_x,ntblks_y);
dim3 dimBlock(nthds);
cout << "GPU Grid Decomposition:" << endl;
cout << " " << PROBLEM_SIZE*PROBLEM_SIZE << " total points " << endl;
cout << " " << ntblks_x << " thread blocks in X" << endl;
cout << " " << ntblks_y << " thread blocks in Y" << endl;
cout << " " << nthds << " threads per block" << endl;
/* allocate space on host and device for input and output data */
float *h_damp, *h_offs, *h_vval;
float *d_damp, *d_offs, *d_vval;
size_t memSize = PROBLEM_SIZE*PROBLEM_SIZE*sizeof(float);
/* allocate host memory */
h_damp = (float *) malloc(memSize);
h_offs = (float *) malloc(memSize);
h_vval = (float *) malloc(memSize);
/* allocate device memory */
cudaMalloc((void**)&d_damp, memSize);
cudaMalloc((void**)&d_offs, memSize);
cudaMalloc((void**)&d_vval, memSize);
assert(FLT_EPSILON < step);
vector <float> row;
row.resize(PROBLEM_SIZE);
/* assign initial data values */
vals = 10.0;
for(i=0; i<PROBLEM_SIZE; ++i){
vals += step;
for(j=0; j<PROBLEM_SIZE; ++j){
h_damp[i*PROBLEM_SIZE + j] = vals;
h_offs[j*PROBLEM_SIZE + i] = vals;
}
}
/* transfer data CPU -> GPU */
cudaMemcpy((void*) d_damp, (void*) h_damp, memSize, cudaMemcpyHostToDevice);
cudaMemcpy((void*) d_offs, (void*) h_offs, memSize, cudaMemcpyHostToDevice);
/*** ---- main compute kernel ----- ***/
/*** this is where the magic happens ***/
cudaEventRecord(kernel_b, 0);
my_voigt<<<dimGrid, dimBlock>>>(d_damp, d_offs, d_vval);
cudaEventRecord(kernel_e, 0);
cudaEventSynchronize(kernel_e);
/* transfer data GPU -> CPU */
cudaMemcpy((void*) h_vval, (void*) d_vval, memSize, cudaMemcpyDeviceToHost);
/* print verification values */
cout << endl << "Verification values:"<<endl;
cout << "-------------------"<<endl;
for(i=PROBLEM_SIZE/2; i<PROBLEM_SIZE/2 + 5; i++){
for(j=0; j<2; j++){
cout << h_vval[i*PROBLEM_SIZE + j] << " ";
}
cout << endl;
}
cout << "-------------------"<<endl;
/* print information about elapsed time */
cudaEventElapsedTime(&kernel_elapsed, kernel_b, kernel_e);
cout << "-----------------------------------------" << endl;
cout << "Elapsed times (msec): "<< endl;
cout << " - voigt kernel: " << kernel_elapsed << endl;
cout << "-----------------------------------------" << endl;
}
|
dcb5929b31cfa6f98d6cf041c8df68d01d527550.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "layers/nms.h"
#include <string.h>
// --------------------------------------------------------------------------
// kernel code
// iou: compute size of overlap between two boxes
// nms_mask: given a set of boxes, compute overlap between all box pairs
// --------------------------------------------------------------------------
// "IoU = intersection area / union area" of two boxes A, B
// A, B: 4-dim array (x1, y1, x2, y2)
#ifdef GPU
__device__
#endif
static
real iou(const real A[], const real B[])
{
#ifndef GPU
if (A[0] > B[2] || A[1] > B[3] || A[2] < B[0] || A[3] < B[1]) {
return 0;
}
else {
#endif
// overlapped region (= box)
const real x1 = MAX(A[0], B[0]);
const real y1 = MAX(A[1], B[1]);
const real x2 = MIN(A[2], B[2]);
const real y2 = MIN(A[3], B[3]);
// intersection area
const real width = MAX(0.0f, x2 - x1 + 1.0f);
const real height = MAX(0.0f, y2 - y1 + 1.0f);
const real area = width * height;
// area of A, B
const real A_area = (A[2] - A[0] + 1.0f) * (A[3] - A[1] + 1.0f);
const real B_area = (B[2] - B[0] + 1.0f) * (B[3] - B[1] + 1.0f);
// IoU
return area / (A_area + B_area - area);
#ifndef GPU
}
#endif
}
// given box proposals, compute overlap between all box pairs
// (overlap = intersection area / union area)
// and then set mask-bit to 1 if a pair is significantly overlapped
// num_boxes: number of box proposals given
// boxes: "num_boxes x 5" array (x1, y1, x2, y2, score)
// nms_thresh: threshold for determining "significant overlap"
// if "intersection area / union area > nms_thresh",
// two boxes are thought of as significantly overlapped
// the all-pair computation (num_boxes x num_boxes) is done by
// divide-and-conquer:
// each GPU block (bj, bi) computes for "64 x 64" box pairs (j, i),
// j = bj * 64 + { 0, 1, ..., 63 }
// i = bi * 64 + { 0, 1, ..., 63 },
// and each "1 x 64" results is saved into a 64-bit mask
// mask: "num_boxes x num_blocks" array
// for mask[j][bi], "di-th bit = 1" means:
// box j is significantly overlapped with box i,
// where i = bi * 64 + di
#ifdef GPU
#define NMS_BLOCK_SIZE 64
typedef unsigned long long uint64;
__global__
static
void nms_mask_gpu(const real boxes[], uint64 mask[],
const int num_boxes, const real nms_thresh)
{
// block region
// j = j_start + { 0, ..., dj_end - 1 }
// i = i_start + { 0, ..., di_end - 1 }
const int i_start = blockIdx.x * NMS_BLOCK_SIZE;
const int di_end = MIN(num_boxes - i_start, NMS_BLOCK_SIZE);
const int j_start = blockIdx.y * NMS_BLOCK_SIZE;
const int dj_end = MIN(num_boxes - j_start, NMS_BLOCK_SIZE);
// copy all i-th boxes to GPU cache
// i = i_start + { 0, ..., di_end - 1 }
__shared__ real boxes_i[NMS_BLOCK_SIZE * 4];
{
const int di = threadIdx.x;
if (di < di_end) {
boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0];
boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1];
boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2];
boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3];
}
}
__syncthreads();
// given j = j_start + dj,
// check whether box i is significantly overlapped with box j
// (i.e., IoU(box j, box i) > threshold)
// for all i = i_start + { 0, ..., di_end - 1 } except for i == j
{
const int dj = threadIdx.x;
if (dj < dj_end) {
// box j
const real* const box_j = boxes + (j_start + dj) * 5;
// mask for significant overlap
// if IoU(box j, box i) > threshold, di-th bit = 1
uint64 mask_j = 0;
// check for all i = i_start + { 0, ..., di_end - 1 }
// except for i == j
const int di_start = (i_start == j_start) ? (dj + 1) : 0;
for (int di = di_start; di < di_end; ++di) {
// box i
const real* const box_i = boxes_i + di * 4;
// if IoU(box j, box i) > threshold, di-th bit = 1
if (iou(box_j, box_i) > nms_thresh) {
mask_j |= 1ULL << di;
}
}
// mask: "num_boxes x num_blocks" array
// for mask[j][bi], "di-th bit = 1" means:
// box j is significantly overlapped with box i = i_start + di,
// where i_start = bi * block_size
{
const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE);
const int bi = blockIdx.x;
mask[(j_start + dj) * num_blocks + bi] = mask_j;
}
} // endif dj < dj_end
}
}
#else
#endif
// --------------------------------------------------------------------------
// auxiliary data structure
// --------------------------------------------------------------------------
// auxiliary data structure for NMS operation
#ifdef GPU
typedef struct NMSAuxData_
{
uint64* mask_cpu;
uint64* dead_bit_cpu;
uint64* mask_gpu;
} NMSAuxData;
#else
typedef struct NMSAuxData_
{
unsigned char* is_dead;
} NMSAuxData;
#endif
// auxiliary data initializer
#ifdef GPU
static
void malloc_nms_aux_data_gpu(NMSAuxData* const aux_data,
const int num_boxes,
long int* const p_space_cpu,
long int* const p_space_gpu)
{
const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE);
aux_data->mask_cpu
= (uint64*)calloc(num_boxes * num_blocks, sizeof(uint64));
aux_data->dead_bit_cpu = (uint64*)calloc(num_blocks, sizeof(uint64));
hipMalloc(&aux_data->mask_gpu, num_boxes * num_blocks * sizeof(uint64));
hipMemset(aux_data->mask_gpu, 0, num_boxes * num_blocks * sizeof(uint64));
*p_space_cpu = num_boxes * num_blocks * sizeof(uint64)
+ num_blocks * sizeof(uint64);
*p_space_gpu = num_boxes * num_blocks * sizeof(uint64);
}
#else
static
void malloc_nms_aux_data_cpu(NMSAuxData* const aux_data,
const int num_boxes,
long int* const p_space_cpu,
long int* const p_space_gpu)
{
aux_data->is_dead
= (unsigned char*)calloc(num_boxes, sizeof(unsigned char));
*p_space_cpu = num_boxes * sizeof(unsigned char);
*p_space_gpu = 0;
}
#endif
// auxiliary data finalizer
#ifdef GPU
static
void free_nms_aux_data_gpu(NMSAuxData* const aux_data)
{
free(aux_data->mask_cpu);
free(aux_data->dead_bit_cpu);
hipFree(aux_data->mask_gpu);
memset(aux_data, 0, sizeof(NMSAuxData));
}
#else
static
void free_nms_aux_data_cpu(NMSAuxData* const aux_data)
{
free(aux_data->is_dead);
memset(aux_data, 0, sizeof(NMSAuxData));
}
#endif
// --------------------------------------------------------------------------
// operator code
// --------------------------------------------------------------------------
// given box proposals (sorted in descending order of their scores),
// discard a box if it is significantly overlapped with
// one or more previous (= scored higher) boxes
// num_boxes: number of box proposals given
// boxes: "num_boxes x 5" array (x1, y1, x2, y2, score)
// sorted in descending order of scores
// aux_data: auxiliary data for NMS operation
// num_out: number of remaining boxes
// index_out_cpu: "num_out x 1" array
// indices of remaining boxes
// allocated at main memory
// base_index: a constant added to index_out_cpu, usually 0
// index_out_cpu[i] = base_index + actual index in boxes
// nms_thresh: threshold for determining "significant overlap"
// if "intersection area / union area > nms_thresh",
// two boxes are thought of as significantly overlapped
// bbox_vote: whether bounding-box voting is used (= 1) or not (= 0)
// vote_thresh: threshold for selecting overlapped boxes
// which are participated in bounding-box voting
#ifdef GPU
void nms(const int num_boxes, real boxes[], void* const aux_data,
int* const num_out, int index_out_cpu[],
const int base_index, const real nms_thresh, const int max_num_out,
const int bbox_vote, const real vote_thresh)
{
const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE);
uint64* const mask_cpu = ((NMSAuxData*)aux_data)->mask_cpu;
{
uint64* const mask_gpu = ((NMSAuxData*)aux_data)->mask_gpu;
const dim3 blocks(num_blocks, num_blocks);
// find all significantly-overlapped pairs of boxes
hipLaunchKernelGGL(( nms_mask_gpu), dim3(blocks), dim3(NMS_BLOCK_SIZE), 0, 0,
boxes, mask_gpu, num_boxes, nms_thresh);
hipMemcpyAsync(mask_cpu, mask_gpu,
sizeof(uint64) * num_boxes * num_blocks,
hipMemcpyDeviceToHost);
}
// discard i-th box if it is significantly overlapped with
// one or more previous (= scored higher) boxes
{
int num_selected = 0;
uint64* const dead_bit_cpu = ((NMSAuxData*)aux_data)->dead_bit_cpu;
memset(dead_bit_cpu, 0, num_blocks * sizeof(uint64));
for (int i = 0; i < num_boxes; ++i) {
const int nblock = i / NMS_BLOCK_SIZE;
const int inblock = i % NMS_BLOCK_SIZE;
if (!(dead_bit_cpu[nblock] & (1ULL << inblock))) {
index_out_cpu[num_selected++] = base_index + i;
const uint64* const mask_i = mask_cpu + i * num_blocks;
for (int j = nblock; j < num_blocks; ++j) {
dead_bit_cpu[j] |= mask_i[j];
}
if (num_selected == max_num_out) {
break;
}
}
}
*num_out = num_selected;
}
}
#else
void nms(const int num_boxes, real boxes[], void* const aux_data,
int* const num_out, int index_out_cpu[],
const int base_index, const real nms_thresh, const int max_num_out,
const int bbox_vote, const real vote_thresh)
{
unsigned char* const is_dead = ((NMSAuxData*)aux_data)->is_dead;
int num_selected = 0;
memset(is_dead, 0, num_boxes * sizeof(unsigned char));
for (int i = 0; i < num_boxes; ++i) {
if (is_dead[i]) {
continue;
}
index_out_cpu[num_selected++] = base_index + i;
if (bbox_vote) {
real sum_score = boxes[i * 5 + 4];
real sum_box[4] = {
sum_score * boxes[i * 5 + 0], sum_score * boxes[i * 5 + 1],
sum_score * boxes[i * 5 + 2], sum_score * boxes[i * 5 + 3]
};
for (int j = 0; j < i; ++j) {
if (is_dead[j] && iou(&boxes[i * 5], &boxes[j * 5]) > vote_thresh) {
real score = boxes[j * 5 + 4];
sum_box[0] += score * boxes[j * 5 + 0];
sum_box[1] += score * boxes[j * 5 + 1];
sum_box[2] += score * boxes[j * 5 + 2];
sum_box[3] += score * boxes[j * 5 + 3];
sum_score += score;
}
}
for (int j = i + 1; j < num_boxes; ++j) {
real iou_val = iou(&boxes[i * 5], &boxes[j * 5]);
if (!is_dead[j] && iou_val > nms_thresh) {
is_dead[j] = 1;
}
if (iou_val > vote_thresh) {
real score = boxes[j * 5 + 4];
sum_box[0] += score * boxes[j * 5 + 0];
sum_box[1] += score * boxes[j * 5 + 1];
sum_box[2] += score * boxes[j * 5 + 2];
sum_box[3] += score * boxes[j * 5 + 3];
sum_score += score;
}
}
boxes[i * 5 + 0] = sum_box[0] / sum_score;
boxes[i * 5 + 1] = sum_box[1] / sum_score;
boxes[i * 5 + 2] = sum_box[2] / sum_score;
boxes[i * 5 + 3] = sum_box[3] / sum_score;
}
else {
for (int j = i + 1; j < num_boxes; ++j) {
if (!is_dead[j] && iou(&boxes[i * 5], &boxes[j * 5]) > nms_thresh) {
is_dead[j] = 1;
}
}
}
if (num_selected == max_num_out) {
break;
}
}
*num_out = num_selected;
}
#endif
// --------------------------------------------------------------------------
// functions for layer-wise operators that use NMS operation
// --------------------------------------------------------------------------
void malloc_nms_aux_data(void** const p_aux_data,
int num_boxes,
long int* const p_space_cpu,
long int* const p_space_gpu)
{
long int space_cpu, space_gpu;
*p_aux_data = (void*)malloc(sizeof(NMSAuxData));
#ifdef GPU
malloc_nms_aux_data_gpu((NMSAuxData*)(*p_aux_data), num_boxes,
&space_cpu, &space_gpu);
#else
malloc_nms_aux_data_cpu((NMSAuxData*)(*p_aux_data), num_boxes,
&space_cpu, &space_gpu);
#endif
*p_space_cpu = space_cpu + sizeof(NMSAuxData);
*p_space_gpu = space_gpu;
}
void free_nms_aux_data(void* const aux_data)
{
#ifdef GPU
free_nms_aux_data_gpu((NMSAuxData*)aux_data);
#else
free_nms_aux_data_cpu((NMSAuxData*)aux_data);
#endif
free(aux_data);
}
| dcb5929b31cfa6f98d6cf041c8df68d01d527550.cu | #include "layers/nms.h"
#include <string.h>
// --------------------------------------------------------------------------
// kernel code
// iou: compute size of overlap between two boxes
// nms_mask: given a set of boxes, compute overlap between all box pairs
// --------------------------------------------------------------------------
// "IoU = intersection area / union area" of two boxes A, B
// A, B: 4-dim array (x1, y1, x2, y2)
#ifdef GPU
__device__
#endif
static
real iou(const real A[], const real B[])
{
#ifndef GPU
if (A[0] > B[2] || A[1] > B[3] || A[2] < B[0] || A[3] < B[1]) {
return 0;
}
else {
#endif
// overlapped region (= box)
const real x1 = MAX(A[0], B[0]);
const real y1 = MAX(A[1], B[1]);
const real x2 = MIN(A[2], B[2]);
const real y2 = MIN(A[3], B[3]);
// intersection area
const real width = MAX(0.0f, x2 - x1 + 1.0f);
const real height = MAX(0.0f, y2 - y1 + 1.0f);
const real area = width * height;
// area of A, B
const real A_area = (A[2] - A[0] + 1.0f) * (A[3] - A[1] + 1.0f);
const real B_area = (B[2] - B[0] + 1.0f) * (B[3] - B[1] + 1.0f);
// IoU
return area / (A_area + B_area - area);
#ifndef GPU
}
#endif
}
// given box proposals, compute overlap between all box pairs
// (overlap = intersection area / union area)
// and then set mask-bit to 1 if a pair is significantly overlapped
// num_boxes: number of box proposals given
// boxes: "num_boxes x 5" array (x1, y1, x2, y2, score)
// nms_thresh: threshold for determining "significant overlap"
// if "intersection area / union area > nms_thresh",
// two boxes are thought of as significantly overlapped
// the all-pair computation (num_boxes x num_boxes) is done by
// divide-and-conquer:
// each GPU block (bj, bi) computes for "64 x 64" box pairs (j, i),
// j = bj * 64 + { 0, 1, ..., 63 }
// i = bi * 64 + { 0, 1, ..., 63 },
// and each "1 x 64" results is saved into a 64-bit mask
// mask: "num_boxes x num_blocks" array
// for mask[j][bi], "di-th bit = 1" means:
// box j is significantly overlapped with box i,
// where i = bi * 64 + di
#ifdef GPU
#define NMS_BLOCK_SIZE 64
typedef unsigned long long uint64;
__global__
static
void nms_mask_gpu(const real boxes[], uint64 mask[],
const int num_boxes, const real nms_thresh)
{
// block region
// j = j_start + { 0, ..., dj_end - 1 }
// i = i_start + { 0, ..., di_end - 1 }
const int i_start = blockIdx.x * NMS_BLOCK_SIZE;
const int di_end = MIN(num_boxes - i_start, NMS_BLOCK_SIZE);
const int j_start = blockIdx.y * NMS_BLOCK_SIZE;
const int dj_end = MIN(num_boxes - j_start, NMS_BLOCK_SIZE);
// copy all i-th boxes to GPU cache
// i = i_start + { 0, ..., di_end - 1 }
__shared__ real boxes_i[NMS_BLOCK_SIZE * 4];
{
const int di = threadIdx.x;
if (di < di_end) {
boxes_i[di * 4 + 0] = boxes[(i_start + di) * 5 + 0];
boxes_i[di * 4 + 1] = boxes[(i_start + di) * 5 + 1];
boxes_i[di * 4 + 2] = boxes[(i_start + di) * 5 + 2];
boxes_i[di * 4 + 3] = boxes[(i_start + di) * 5 + 3];
}
}
__syncthreads();
// given j = j_start + dj,
// check whether box i is significantly overlapped with box j
// (i.e., IoU(box j, box i) > threshold)
// for all i = i_start + { 0, ..., di_end - 1 } except for i == j
{
const int dj = threadIdx.x;
if (dj < dj_end) {
// box j
const real* const box_j = boxes + (j_start + dj) * 5;
// mask for significant overlap
// if IoU(box j, box i) > threshold, di-th bit = 1
uint64 mask_j = 0;
// check for all i = i_start + { 0, ..., di_end - 1 }
// except for i == j
const int di_start = (i_start == j_start) ? (dj + 1) : 0;
for (int di = di_start; di < di_end; ++di) {
// box i
const real* const box_i = boxes_i + di * 4;
// if IoU(box j, box i) > threshold, di-th bit = 1
if (iou(box_j, box_i) > nms_thresh) {
mask_j |= 1ULL << di;
}
}
// mask: "num_boxes x num_blocks" array
// for mask[j][bi], "di-th bit = 1" means:
// box j is significantly overlapped with box i = i_start + di,
// where i_start = bi * block_size
{
const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE);
const int bi = blockIdx.x;
mask[(j_start + dj) * num_blocks + bi] = mask_j;
}
} // endif dj < dj_end
}
}
#else
#endif
// --------------------------------------------------------------------------
// auxiliary data structure
// --------------------------------------------------------------------------
// auxiliary data structure for NMS operation
#ifdef GPU
typedef struct NMSAuxData_
{
uint64* mask_cpu;
uint64* dead_bit_cpu;
uint64* mask_gpu;
} NMSAuxData;
#else
typedef struct NMSAuxData_
{
unsigned char* is_dead;
} NMSAuxData;
#endif
// auxiliary data initializer
#ifdef GPU
static
void malloc_nms_aux_data_gpu(NMSAuxData* const aux_data,
const int num_boxes,
long int* const p_space_cpu,
long int* const p_space_gpu)
{
const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE);
aux_data->mask_cpu
= (uint64*)calloc(num_boxes * num_blocks, sizeof(uint64));
aux_data->dead_bit_cpu = (uint64*)calloc(num_blocks, sizeof(uint64));
cudaMalloc(&aux_data->mask_gpu, num_boxes * num_blocks * sizeof(uint64));
cudaMemset(aux_data->mask_gpu, 0, num_boxes * num_blocks * sizeof(uint64));
*p_space_cpu = num_boxes * num_blocks * sizeof(uint64)
+ num_blocks * sizeof(uint64);
*p_space_gpu = num_boxes * num_blocks * sizeof(uint64);
}
#else
static
void malloc_nms_aux_data_cpu(NMSAuxData* const aux_data,
const int num_boxes,
long int* const p_space_cpu,
long int* const p_space_gpu)
{
aux_data->is_dead
= (unsigned char*)calloc(num_boxes, sizeof(unsigned char));
*p_space_cpu = num_boxes * sizeof(unsigned char);
*p_space_gpu = 0;
}
#endif
// auxiliary data finalizer
#ifdef GPU
static
void free_nms_aux_data_gpu(NMSAuxData* const aux_data)
{
free(aux_data->mask_cpu);
free(aux_data->dead_bit_cpu);
cudaFree(aux_data->mask_gpu);
memset(aux_data, 0, sizeof(NMSAuxData));
}
#else
static
void free_nms_aux_data_cpu(NMSAuxData* const aux_data)
{
free(aux_data->is_dead);
memset(aux_data, 0, sizeof(NMSAuxData));
}
#endif
// --------------------------------------------------------------------------
// operator code
// --------------------------------------------------------------------------
// given box proposals (sorted in descending order of their scores),
// discard a box if it is significantly overlapped with
// one or more previous (= scored higher) boxes
// num_boxes: number of box proposals given
// boxes: "num_boxes x 5" array (x1, y1, x2, y2, score)
// sorted in descending order of scores
// aux_data: auxiliary data for NMS operation
// num_out: number of remaining boxes
// index_out_cpu: "num_out x 1" array
// indices of remaining boxes
// allocated at main memory
// base_index: a constant added to index_out_cpu, usually 0
// index_out_cpu[i] = base_index + actual index in boxes
// nms_thresh: threshold for determining "significant overlap"
// if "intersection area / union area > nms_thresh",
// two boxes are thought of as significantly overlapped
// bbox_vote: whether bounding-box voting is used (= 1) or not (= 0)
// vote_thresh: threshold for selecting overlapped boxes
// which are participated in bounding-box voting
#ifdef GPU
void nms(const int num_boxes, real boxes[], void* const aux_data,
int* const num_out, int index_out_cpu[],
const int base_index, const real nms_thresh, const int max_num_out,
const int bbox_vote, const real vote_thresh)
{
const int num_blocks = DIV_THEN_CEIL(num_boxes, NMS_BLOCK_SIZE);
uint64* const mask_cpu = ((NMSAuxData*)aux_data)->mask_cpu;
{
uint64* const mask_gpu = ((NMSAuxData*)aux_data)->mask_gpu;
const dim3 blocks(num_blocks, num_blocks);
// find all significantly-overlapped pairs of boxes
nms_mask_gpu<<<blocks, NMS_BLOCK_SIZE>>>(
boxes, mask_gpu, num_boxes, nms_thresh);
cudaMemcpyAsync(mask_cpu, mask_gpu,
sizeof(uint64) * num_boxes * num_blocks,
cudaMemcpyDeviceToHost);
}
// discard i-th box if it is significantly overlapped with
// one or more previous (= scored higher) boxes
{
int num_selected = 0;
uint64* const dead_bit_cpu = ((NMSAuxData*)aux_data)->dead_bit_cpu;
memset(dead_bit_cpu, 0, num_blocks * sizeof(uint64));
for (int i = 0; i < num_boxes; ++i) {
const int nblock = i / NMS_BLOCK_SIZE;
const int inblock = i % NMS_BLOCK_SIZE;
if (!(dead_bit_cpu[nblock] & (1ULL << inblock))) {
index_out_cpu[num_selected++] = base_index + i;
const uint64* const mask_i = mask_cpu + i * num_blocks;
for (int j = nblock; j < num_blocks; ++j) {
dead_bit_cpu[j] |= mask_i[j];
}
if (num_selected == max_num_out) {
break;
}
}
}
*num_out = num_selected;
}
}
#else
void nms(const int num_boxes, real boxes[], void* const aux_data,
int* const num_out, int index_out_cpu[],
const int base_index, const real nms_thresh, const int max_num_out,
const int bbox_vote, const real vote_thresh)
{
unsigned char* const is_dead = ((NMSAuxData*)aux_data)->is_dead;
int num_selected = 0;
memset(is_dead, 0, num_boxes * sizeof(unsigned char));
for (int i = 0; i < num_boxes; ++i) {
if (is_dead[i]) {
continue;
}
index_out_cpu[num_selected++] = base_index + i;
if (bbox_vote) {
real sum_score = boxes[i * 5 + 4];
real sum_box[4] = {
sum_score * boxes[i * 5 + 0], sum_score * boxes[i * 5 + 1],
sum_score * boxes[i * 5 + 2], sum_score * boxes[i * 5 + 3]
};
for (int j = 0; j < i; ++j) {
if (is_dead[j] && iou(&boxes[i * 5], &boxes[j * 5]) > vote_thresh) {
real score = boxes[j * 5 + 4];
sum_box[0] += score * boxes[j * 5 + 0];
sum_box[1] += score * boxes[j * 5 + 1];
sum_box[2] += score * boxes[j * 5 + 2];
sum_box[3] += score * boxes[j * 5 + 3];
sum_score += score;
}
}
for (int j = i + 1; j < num_boxes; ++j) {
real iou_val = iou(&boxes[i * 5], &boxes[j * 5]);
if (!is_dead[j] && iou_val > nms_thresh) {
is_dead[j] = 1;
}
if (iou_val > vote_thresh) {
real score = boxes[j * 5 + 4];
sum_box[0] += score * boxes[j * 5 + 0];
sum_box[1] += score * boxes[j * 5 + 1];
sum_box[2] += score * boxes[j * 5 + 2];
sum_box[3] += score * boxes[j * 5 + 3];
sum_score += score;
}
}
boxes[i * 5 + 0] = sum_box[0] / sum_score;
boxes[i * 5 + 1] = sum_box[1] / sum_score;
boxes[i * 5 + 2] = sum_box[2] / sum_score;
boxes[i * 5 + 3] = sum_box[3] / sum_score;
}
else {
for (int j = i + 1; j < num_boxes; ++j) {
if (!is_dead[j] && iou(&boxes[i * 5], &boxes[j * 5]) > nms_thresh) {
is_dead[j] = 1;
}
}
}
if (num_selected == max_num_out) {
break;
}
}
*num_out = num_selected;
}
#endif
// --------------------------------------------------------------------------
// functions for layer-wise operators that use NMS operation
// --------------------------------------------------------------------------
void malloc_nms_aux_data(void** const p_aux_data,
int num_boxes,
long int* const p_space_cpu,
long int* const p_space_gpu)
{
long int space_cpu, space_gpu;
*p_aux_data = (void*)malloc(sizeof(NMSAuxData));
#ifdef GPU
malloc_nms_aux_data_gpu((NMSAuxData*)(*p_aux_data), num_boxes,
&space_cpu, &space_gpu);
#else
malloc_nms_aux_data_cpu((NMSAuxData*)(*p_aux_data), num_boxes,
&space_cpu, &space_gpu);
#endif
*p_space_cpu = space_cpu + sizeof(NMSAuxData);
*p_space_gpu = space_gpu;
}
void free_nms_aux_data(void* const aux_data)
{
#ifdef GPU
free_nms_aux_data_gpu((NMSAuxData*)aux_data);
#else
free_nms_aux_data_cpu((NMSAuxData*)aux_data);
#endif
free(aux_data);
}
|
2b4812221730e3ce6b4e75d68aca683d24376993.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "yeti_rank_pointwise.cuh"
#include "radix_sort_block.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <contrib/libs/cub/cub/block/block_radix_sort.cuh>
namespace NKernel
{
__global__ void RemoveQueryMeansImpl(const int* qids, int size, const float* queryMeans,
float* approx)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
approx[tid] -= queryMeans[qids[tid]];
}
}
void RemoveQueryMeans(const int* qids, int size, const float* queryMeans,
float* approx, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
if (numBlocks > 0) {
hipLaunchKernelGGL(( RemoveQueryMeansImpl), dim3(numBlocks), dim3(blockSize), 0, stream , qids, size, queryMeans, approx);
}
}
template <ui32 BLOCK_SIZE>
__device__ void YetiRankGradientSingleGroup(ui32 seed,
ui32 bootstrapIter,
const float* __restrict__ approx, const float* __restrict__ relev,
const int* __restrict__ qids, int size,
float* approxes,
volatile float* __restrict__ targetDst,
volatile float* __restrict__ weightDst) {
const int N = 4;
ui32 srcIndex[N]; //contains offset and qid of point
i16 queryBegin[N];
__shared__ float relevs[BLOCK_SIZE * 4]; // 4K
{
{
int* queryIds = (int*) approxes;
const int firstQid = __ldg(qids);
for (int k = 0; k < N; k++) {
int offset = threadIdx.x + k * BLOCK_SIZE;
int qid = offset < size ? qids[offset] : qids[size - 1] + 1;
qid -= firstQid;
queryIds[offset] = qid;
srcIndex[k] = offset;
srcIndex[k] |= qid << 10; //first 10 bits point in group, then local qid
}
int* queryOffsets = (int*) relevs;
queryOffsets[threadIdx.x] = size;
__syncthreads();
for (int k = 0; k < N; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE; //point id
if (!offset || queryIds[offset] != queryIds[offset - 1])
{
const int qid = queryIds[offset];
queryOffsets[qid] = offset;
}
}
__syncthreads();
for (int k = 0; k < N; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE; //point id
int qid = queryIds[offset];
queryBegin[k] = queryOffsets[qid];
}
__syncthreads();
}
for (int k = 0; k < 4; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE;
relevs[offset] = offset < size ? relev[offset] : 1000.0f;
approxes[offset] = offset < size ? __expf(min(approx[offset], 70.0f)) : 1000.0f;
}
}
__syncthreads();
__shared__ ui32 indices[BLOCK_SIZE * N];
for (int t = 0; t < bootstrapIter; t++)
{
ui32 key[N];
ui32 idx[N] = {srcIndex[0], srcIndex[1], srcIndex[2], srcIndex[3]};
for (int k = 0; k < N; k++)
{
float val = (idx[k] & 1023) < size ? approxes[idx[k] & 1023] : -1000.0f;
const float uni = NextUniformFloat32(&seed);
val *= uni / (1.000001f - uni);
key[k] = __float_as_int(val);
key[k] ^= (key[k] & 0x80000000) ? 0xffffffff : 0x80000000;
}
{
RadixSortSingleBlock4<BLOCK_SIZE, false, 0, 32>((uint4&)key, (uint4&)idx, indices);
RadixSortSingleBlock4<BLOCK_SIZE, true, 10, 10>((uint4&)idx, indices);
}
//now key[k] is idx of document on position (threadIdx.x + k * BLOCK_SIZE - queryOffset) in query key[k] >> 10
for (int k = 0; k < N; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE;
indices[offset] = idx[k] & 1023;
}
__syncthreads();
for (int k = 0; k < N; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE;
const int idx1 = offset != queryBegin[k] ? (int)indices[offset - 1] : -1;
const int idx2 = (int)indices[offset];
const float relev1 = idx1 != -1 ? relevs[idx1] : 0;
const float relev2 = relevs[idx2];
const float approx1 = idx1 != -1 ? approxes[idx1] : 0;
const float approx2 = approxes[idx2];
const float decaySpeed = 0.99f;
const float magicConst = 10; //to make learning rate more comparable with pair classification
const float decay = magicConst * powf(decaySpeed, offset - queryBegin[k] - 1);
const float pairWeight = decay * fabs(relev1 - relev2) / bootstrapIter;
const float ll = pairWeight * (relev1 > relev2 ? approx2 : -approx1) / (approx2 + approx1); //
if (idx1 != -1 && offset < size) {
weightDst[idx1] += pairWeight;
targetDst[idx1] += ll;
}
__syncthreads();
if (idx1 != -1 && offset < size) {
weightDst[idx2] += pairWeight;
targetDst[idx2] += -ll;
}
__syncthreads();
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
__global__ void YetiRankGradientImpl(int seed,
ui32 bootstrapIter,
const ui32* queryOffsets,
volatile int* qidCursor,
ui32 qOffsetsBias, ui32 qCount,
const int* qids,
const float* approx,
const float* relev,
ui32 size,
float* targetDst,
float* weightDst) {
__shared__ float approxes[BLOCK_SIZE * 4]; // 4K
while (true)
{
int taskQid = 0;
int* sharedQid = (int*) approxes;
int offset = 0;
int nextTaskOffset = 0;
if (threadIdx.x == 0) {
taskQid = qidCursor[0];
while (true)
{
if (taskQid >= qCount) {
break;
}
offset = queryOffsets[taskQid] - qOffsetsBias;
nextTaskOffset = min(offset + 4 * BLOCK_SIZE, size);
int nextTaskQid = nextTaskOffset < size ? qids[nextTaskOffset] : qCount;
int oldQid = atomicCAS(const_cast<int*>(qidCursor), taskQid, nextTaskQid);
if (oldQid == taskQid) {
break;
} else {
taskQid = oldQid;
}
}
}
if (threadIdx.x == 0) {
sharedQid[0] = taskQid;
sharedQid[1] = offset;
sharedQid[2] = nextTaskOffset;
}
__syncthreads();
taskQid = sharedQid[0];
offset = sharedQid[1];
nextTaskOffset = sharedQid[2];
__syncthreads();
if (taskQid >= qCount) {
return;
}
//statisticians will complain :) but we don't need high-quality random generators
ui32 taskSeed = 127 * taskQid + 16807 * threadIdx.x;
AdvanceSeed32(&taskSeed);
taskSeed += seed;
AdvanceSeed32(&taskSeed);
YetiRankGradientSingleGroup<BLOCK_SIZE>(taskSeed,
bootstrapIter,
approx + offset,
relev + offset,
qids + offset,
nextTaskOffset - offset,
approxes,
targetDst + offset,
weightDst + offset);
__syncthreads();
}
}
void YetiRankGradient(ui64 seed,
ui32 bootstrapIter,
const ui32* queryOffsets,
int* qidCursor,
ui32 qOffsetsBias,
ui32 qCount,
const int* qids,
const float* approx,
const float* relev,
ui32 size,
float* targetDst,
float* weightDst,
TCudaStream stream)
{
const ui32 maxBlocksPerSm = 4;
const ui32 smCount = TArchProps::SMCount();
const int blockSize = 256;
FillBuffer(targetDst, 0.0f, size, stream);
FillBuffer(weightDst, 0.0f, size, stream);
FillBuffer(qidCursor, 0, 1, stream);
int cudaSeed = seed + (seed >> 32);
hipLaunchKernelGGL(( YetiRankGradientImpl<blockSize>), dim3(maxBlocksPerSm * smCount), dim3(blockSize), 0, stream, cudaSeed,
bootstrapIter, queryOffsets,
qidCursor, qOffsetsBias, qCount, qids,
approx, relev, size, targetDst, weightDst);
}
//
}
| 2b4812221730e3ce6b4e75d68aca683d24376993.cu | #include "yeti_rank_pointwise.cuh"
#include "radix_sort_block.cuh"
#include <catboost/cuda/cuda_lib/kernel/kernel.cuh>
#include <catboost/cuda/cuda_lib/kernel/arch.cuh>
#include <catboost/cuda/cuda_util/kernel/kernel_helpers.cuh>
#include <catboost/cuda/cuda_util/kernel/fill.cuh>
#include <catboost/cuda/cuda_util/kernel/random_gen.cuh>
#include <contrib/libs/cub/cub/block/block_radix_sort.cuh>
namespace NKernel
{
__global__ void RemoveQueryMeansImpl(const int* qids, int size, const float* queryMeans,
float* approx)
{
const int tid = blockDim.x * blockIdx.x + threadIdx.x;
if (tid < size) {
approx[tid] -= queryMeans[qids[tid]];
}
}
void RemoveQueryMeans(const int* qids, int size, const float* queryMeans,
float* approx, TCudaStream stream) {
const int blockSize = 256;
const int numBlocks = (size + blockSize - 1) / blockSize;
if (numBlocks > 0) {
RemoveQueryMeansImpl<<< numBlocks, blockSize, 0, stream >>> (qids, size, queryMeans, approx);
}
}
template <ui32 BLOCK_SIZE>
__device__ void YetiRankGradientSingleGroup(ui32 seed,
ui32 bootstrapIter,
const float* __restrict__ approx, const float* __restrict__ relev,
const int* __restrict__ qids, int size,
float* approxes,
volatile float* __restrict__ targetDst,
volatile float* __restrict__ weightDst) {
const int N = 4;
ui32 srcIndex[N]; //contains offset and qid of point
i16 queryBegin[N];
__shared__ float relevs[BLOCK_SIZE * 4]; // 4K
{
{
int* queryIds = (int*) approxes;
const int firstQid = __ldg(qids);
for (int k = 0; k < N; k++) {
int offset = threadIdx.x + k * BLOCK_SIZE;
int qid = offset < size ? qids[offset] : qids[size - 1] + 1;
qid -= firstQid;
queryIds[offset] = qid;
srcIndex[k] = offset;
srcIndex[k] |= qid << 10; //first 10 bits — point in group, then local qid
}
int* queryOffsets = (int*) relevs;
queryOffsets[threadIdx.x] = size;
__syncthreads();
for (int k = 0; k < N; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE; //point id
if (!offset || queryIds[offset] != queryIds[offset - 1])
{
const int qid = queryIds[offset];
queryOffsets[qid] = offset;
}
}
__syncthreads();
for (int k = 0; k < N; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE; //point id
int qid = queryIds[offset];
queryBegin[k] = queryOffsets[qid];
}
__syncthreads();
}
for (int k = 0; k < 4; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE;
relevs[offset] = offset < size ? relev[offset] : 1000.0f;
approxes[offset] = offset < size ? __expf(min(approx[offset], 70.0f)) : 1000.0f;
}
}
__syncthreads();
__shared__ ui32 indices[BLOCK_SIZE * N];
for (int t = 0; t < bootstrapIter; t++)
{
ui32 key[N];
ui32 idx[N] = {srcIndex[0], srcIndex[1], srcIndex[2], srcIndex[3]};
for (int k = 0; k < N; k++)
{
float val = (idx[k] & 1023) < size ? approxes[idx[k] & 1023] : -1000.0f;
const float uni = NextUniformFloat32(&seed);
val *= uni / (1.000001f - uni);
key[k] = __float_as_int(val);
key[k] ^= (key[k] & 0x80000000) ? 0xffffffff : 0x80000000;
}
{
RadixSortSingleBlock4<BLOCK_SIZE, false, 0, 32>((uint4&)key, (uint4&)idx, indices);
RadixSortSingleBlock4<BLOCK_SIZE, true, 10, 10>((uint4&)idx, indices);
}
//now key[k] is idx of document on position (threadIdx.x + k * BLOCK_SIZE - queryOffset) in query key[k] >> 10
for (int k = 0; k < N; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE;
indices[offset] = idx[k] & 1023;
}
__syncthreads();
for (int k = 0; k < N; k++) {
const int offset = threadIdx.x + k * BLOCK_SIZE;
const int idx1 = offset != queryBegin[k] ? (int)indices[offset - 1] : -1;
const int idx2 = (int)indices[offset];
const float relev1 = idx1 != -1 ? relevs[idx1] : 0;
const float relev2 = relevs[idx2];
const float approx1 = idx1 != -1 ? approxes[idx1] : 0;
const float approx2 = approxes[idx2];
const float decaySpeed = 0.99f;
const float magicConst = 10; //to make learning rate more comparable with pair classification
const float decay = magicConst * powf(decaySpeed, offset - queryBegin[k] - 1);
const float pairWeight = decay * fabs(relev1 - relev2) / bootstrapIter;
const float ll = pairWeight * (relev1 > relev2 ? approx2 : -approx1) / (approx2 + approx1); //
if (idx1 != -1 && offset < size) {
weightDst[idx1] += pairWeight;
targetDst[idx1] += ll;
}
__syncthreads();
if (idx1 != -1 && offset < size) {
weightDst[idx2] += pairWeight;
targetDst[idx2] += -ll;
}
__syncthreads();
}
__syncthreads();
}
};
template<int BLOCK_SIZE>
__global__ void YetiRankGradientImpl(int seed,
ui32 bootstrapIter,
const ui32* queryOffsets,
volatile int* qidCursor,
ui32 qOffsetsBias, ui32 qCount,
const int* qids,
const float* approx,
const float* relev,
ui32 size,
float* targetDst,
float* weightDst) {
__shared__ float approxes[BLOCK_SIZE * 4]; // 4K
while (true)
{
int taskQid = 0;
int* sharedQid = (int*) approxes;
int offset = 0;
int nextTaskOffset = 0;
if (threadIdx.x == 0) {
taskQid = qidCursor[0];
while (true)
{
if (taskQid >= qCount) {
break;
}
offset = queryOffsets[taskQid] - qOffsetsBias;
nextTaskOffset = min(offset + 4 * BLOCK_SIZE, size);
int nextTaskQid = nextTaskOffset < size ? qids[nextTaskOffset] : qCount;
int oldQid = atomicCAS(const_cast<int*>(qidCursor), taskQid, nextTaskQid);
if (oldQid == taskQid) {
break;
} else {
taskQid = oldQid;
}
}
}
if (threadIdx.x == 0) {
sharedQid[0] = taskQid;
sharedQid[1] = offset;
sharedQid[2] = nextTaskOffset;
}
__syncthreads();
taskQid = sharedQid[0];
offset = sharedQid[1];
nextTaskOffset = sharedQid[2];
__syncthreads();
if (taskQid >= qCount) {
return;
}
//statisticians will complain :) but we don't need high-quality random generators
ui32 taskSeed = 127 * taskQid + 16807 * threadIdx.x;
AdvanceSeed32(&taskSeed);
taskSeed += seed;
AdvanceSeed32(&taskSeed);
YetiRankGradientSingleGroup<BLOCK_SIZE>(taskSeed,
bootstrapIter,
approx + offset,
relev + offset,
qids + offset,
nextTaskOffset - offset,
approxes,
targetDst + offset,
weightDst + offset);
__syncthreads();
}
}
void YetiRankGradient(ui64 seed,
ui32 bootstrapIter,
const ui32* queryOffsets,
int* qidCursor,
ui32 qOffsetsBias,
ui32 qCount,
const int* qids,
const float* approx,
const float* relev,
ui32 size,
float* targetDst,
float* weightDst,
TCudaStream stream)
{
const ui32 maxBlocksPerSm = 4;
const ui32 smCount = TArchProps::SMCount();
const int blockSize = 256;
FillBuffer(targetDst, 0.0f, size, stream);
FillBuffer(weightDst, 0.0f, size, stream);
FillBuffer(qidCursor, 0, 1, stream);
int cudaSeed = seed + (seed >> 32);
YetiRankGradientImpl<blockSize><<<maxBlocksPerSm * smCount, blockSize, 0, stream>>>(cudaSeed,
bootstrapIter, queryOffsets,
qidCursor, qOffsetsBias, qCount, qids,
approx, relev, size, targetDst, weightDst);
}
//
}
|
1af4e4d511b0f325c5189b9256edeea2ee5d5fc6.hip | // !!! This is a file automatically generated by hipify!!!
#include <cudnn.h>
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 160
#define N 96
#define H 28
#define W 28
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(hipError_t code)
{
if (code != hipSuccess)
{
std::cerr << "ERROR!!!:" << hipGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
hipMalloc(&kernel,sizeof(float)*C*N*9);
hipMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
hipMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
hipMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),hipMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
hipMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[4];
__shared__ float pad_temp_shared[2560];
__shared__ float kernel_shared[2560];
float pad_temp_shared_local[10];
float kernel_shared_local[10];
compute_local[(0)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
for (int rx_outer = 0; rx_outer < 3; ++rx_outer) {
__syncthreads();
pad_temp_shared[((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 1))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 2))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 3))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 4))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 5))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 6))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 7))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 8))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 9))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 10))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 11))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 12))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 13))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 14))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 15))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 16))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 755))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 17))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 756))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 18))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 757))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 19))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 758))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 20))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 21))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 22))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 23))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 24))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 25))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 26))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 27))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 28))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 29))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 30))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 31))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 32))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1539))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 33))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1540))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 34))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1541))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 35))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1542))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 36))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 37))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 38))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 39))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
kernel_shared[((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)))] = kernel[(((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 1))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 9))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 2))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 18))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 3))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 27))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 4))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 36))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 5))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 45))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 6))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 54))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 7))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 63))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 8))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 72))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 9))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 81))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 10))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 90))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 11))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 99))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 12))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 108))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 13))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 117))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 14))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 126))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 15))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 135))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 16))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 144))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 17))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 153))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 18))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 162))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 19))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 171))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 20))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 180))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 21))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 189))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 22))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 198))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 23))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 207))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 24))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 216))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 25))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 225))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 26))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 234))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 27))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 243))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 28))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 252))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 29))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 261))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 30))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 270))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 31))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 279))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 32))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 288))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 33))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 297))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 34))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 306))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 35))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 315))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 36))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 324))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 37))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 333))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 38))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 342))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 39))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 351))];
__syncthreads();
pad_temp_shared_local[(0)] = pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 8))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 24))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 40))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 56))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 72))];
kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 160))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1280))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1281))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 2))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1282))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 3))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1283))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 4))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1284))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 88))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 104))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 120))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 136))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 152))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 5))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1285))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 6))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1286))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 7))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1287))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 8))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1288))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 9))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1289))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 168))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 184))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 200))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 216))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 232))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 10))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1290))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 11))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1291))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 12))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1292))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 13))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1293))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 14))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1294))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 248))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 264))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 280))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 296))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 312))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 15))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1295))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 16))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1296))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 17))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1297))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 18))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1298))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 19))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1299))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 328))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 344))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 360))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 376))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 392))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 20))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1300))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 21))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1301))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 22))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1302))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 23))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1303))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 24))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1304))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 408))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 424))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 440))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 456))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 472))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 25))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1305))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 26))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1306))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 27))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1307))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 28))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1308))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 29))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1309))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 488))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 504))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 520))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 536))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 552))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 30))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1310))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 31))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1311))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 32))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1312))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 33))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1313))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 34))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1314))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 568))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 584))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 600))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 616))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 632))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 35))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1315))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 36))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1316))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 37))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1317))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 38))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1318))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 39))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1319))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 648))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 664))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 680))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 696))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 712))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 40))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1320))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 41))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1321))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 42))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1322))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 43))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1323))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 44))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1324))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 728))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 744))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 760))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 776))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 792))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 45))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1325))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 46))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1326))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 47))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1327))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 48))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1328))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 49))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1329))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 808))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 824))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 840))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 856))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 872))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 50))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1330))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 51))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1331))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 52))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1332))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 53))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1333))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 54))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1334))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 888))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 904))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 920))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 936))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 952))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 55))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1335))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 56))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1336))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 57))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1337))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 58))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1338))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 59))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1339))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 968))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 984))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1000))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1016))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1032))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 60))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1340))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 61))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1341))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 62))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1342))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 63))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1343))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 64))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1344))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1048))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1064))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1080))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1096))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1112))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 65))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1345))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 66))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1346))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 67))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1347))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 68))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1348))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 69))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1349))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1128))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1144))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1160))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1176))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1192))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 70))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1350))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 71))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1351))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 72))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1352))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 73))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1353))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 74))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1354))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1208))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1224))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1240))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1256))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1272))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 75))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1355))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 76))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1356))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 77))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1357))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 78))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1358))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 79))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1359))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1288))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1304))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1320))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1336))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1352))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 80))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1360))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 81))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1361))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 82))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1362))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 83))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1363))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 84))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1364))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1368))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1384))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1400))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1416))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1432))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 85))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1365))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 86))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1366))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 87))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1367))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 88))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1368))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 89))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1369))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1448))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1464))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1480))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1496))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1512))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 90))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1370))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 91))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1371))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 92))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1372))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 93))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1373))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 94))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1374))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1528))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1544))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1560))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1576))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1592))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 95))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1375))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 96))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1376))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 97))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1377))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 98))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1378))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 99))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1379))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1608))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1624))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1640))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1656))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1672))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 100))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1380))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 101))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1381))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 102))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1382))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 103))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1383))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 104))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1384))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1688))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1704))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1720))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1736))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1752))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 105))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1385))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 106))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1386))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 107))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1387))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 108))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1388))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 109))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1389))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1768))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1784))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1800))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1816))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1832))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 110))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1390))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 111))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1391))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 112))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1392))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 113))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1393))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 114))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1394))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1848))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1864))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1880))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1896))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1912))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 115))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1395))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 116))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1396))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 117))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1397))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 118))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1398))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 119))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1399))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1928))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1944))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1960))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1976))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1992))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 120))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1400))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 121))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1401))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 122))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1402))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 123))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1403))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 124))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1404))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2008))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2024))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2040))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2048))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2056))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2064))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2072))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 125))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1405))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 126))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1406))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 127))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1407))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 128))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1408))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 129))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1409))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2080))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2088))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2096))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2104))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2112))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2120))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2128))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2136))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2144))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2152))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 130))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1410))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 131))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1411))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 132))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1412))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 133))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1413))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 134))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1414))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2160))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2168))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2176))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2184))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2192))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2200))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2208))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2216))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2224))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2232))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 135))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1415))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 136))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1416))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 137))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1417))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 138))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1418))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 139))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1419))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2240))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2248))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2256))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2264))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2272))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2280))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2288))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2296))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2304))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2312))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 140))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1420))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 141))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1421))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 142))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1422))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 143))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1423))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 144))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1424))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2320))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2328))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2336))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2344))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2352))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2360))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2368))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2376))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2384))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2392))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 145))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1425))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 146))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1426))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 147))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1427))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 148))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1428))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 149))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1429))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2400))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2408))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2416))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2424))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2432))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2440))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2448))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2456))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2464))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2472))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 150))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1430))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 151))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1431))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 152))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1432))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 153))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1433))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 154))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1434))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2480))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2488))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2496))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2504))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2512))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2520))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2528))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2536))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2544))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2552))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 155))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1435))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 156))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1436))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 157))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1437))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 158))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1438))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 159))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1439))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
}
}
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)))] = compute_local[(0)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 6272))] = compute_local[(2)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 56))] = compute_local[(1)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 6328))] = compute_local[(3)];
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
hipMalloc(&device_input,C*H*W*sizeof(float));
hipMemcpy(device_input,input,C*H*W*sizeof(float),hipMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
hipEvent_t event_start;
hipEvent_t event_stop;
hipEventCreate(&event_start);
hipEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
hipMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
hipMalloc(&device_out,H*W*N*sizeof(float));
hipMemset(device_out,0,H*W*N*sizeof(float));
hipMalloc(&device_K,C*N*9*sizeof(float));
hipMemcpy(device_K,K,C*N*9*sizeof(float),hipMemcpyHostToDevice);
hipEventRecord(event_start);
convGemm.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnGemmTime;
hipEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
hipEventRecord(event_start);
convWinogradeNon.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
hipEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
hipEventRecord(event_start);
convFFT.forward(device_input);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float cudnnFFTTime;
hipEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(7,7,6);
dim3 block(4,2,8);
float * paddedInputDevice;
chkerr(hipMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(hipMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), hipMemcpyHostToDevice));
hipEventRecord(event_start);
hipLaunchKernelGGL(( default_function_kernel0), dim3(grid), dim3(block), 0, 0, device_input, device_K, device_out);
hipEventRecord(event_stop);
hipEventSynchronize(event_stop);
float time_tdc;
hipEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
hipMemcpy(out_tdc,device_out,N*H*W*sizeof(float),hipMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
| 1af4e4d511b0f325c5189b9256edeea2ee5d5fc6.cu | #include <cudnn.h>
#include <stdio.h>
#include <cuda.h>
#include <malloc.h>
#include <cstdlib>
#include <time.h>
#include <iostream>
#include <sys/types.h>
#include <errno.h>
#include <vector>
#include <fstream>
#include <string>
#include <omp.h>
#define C 160
#define N 96
#define H 28
#define W 28
#define R 3
#define S 3
using namespace std;
#define checkCUDNN(expression) \
{ \
cudnnStatus_t status = (expression); \
if (status != CUDNN_STATUS_SUCCESS) { \
std::cerr << "Error on line " << __LINE__ << ": " \
<< cudnnGetErrorString(status) << std::endl; \
std::exit(EXIT_FAILURE); \
} \
}
inline void chkerr(cudaError_t code)
{
if (code != cudaSuccess)
{
std::cerr << "ERROR!!!:" << cudaGetErrorString(code) <<endl;
exit(-1);
}
}
class ConvGemm{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvGemm::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvGemm::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_IMPLICIT_GEMM,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvWinogradeNon{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvWinogradeNon::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvWinogradeNon::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_WINOGRAD_NONFUSED,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
class ConvFFT{
public:
float *cpuKernel;
float alpha = 1.0f;
float beta = 0.0f;
cudnnHandle_t convCudnn;
void* d_workspace{nullptr};
size_t workspace_bytes{0};
cudnnTensorDescriptor_t convInputDescriptor;
cudnnTensorDescriptor_t convOutputDescriptor;
cudnnFilterDescriptor_t convKernelDescriptor;
cudnnConvolutionDescriptor_t convDesc;
float *output;
float *kernel;
void initialize();
float *forward(float *input);
};
void ConvFFT::initialize(){
cudaMalloc(&kernel,sizeof(float)*C*N*9);
cudaMalloc(&this->output,sizeof(float)*N*H*W);
cudnnCreate(&convCudnn);
cudnnCreateTensorDescriptor(&convInputDescriptor);
cudnnSetTensor4dDescriptor(convInputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/C,
/*image_height=*/H,
/*image_width=*/W);
cudnnCreateFilterDescriptor(&convKernelDescriptor);
cudnnSetFilter4dDescriptor(convKernelDescriptor,
/*dataType=*/CUDNN_DATA_FLOAT,
/*format=*/CUDNN_TENSOR_NCHW,
/*out_channels=*/N,
/*in_channels=*/C,
/*kernel_height=*/R,
/*kernel_width=*/S);
cudnnCreateConvolutionDescriptor(&convDesc);
cudnnSetConvolution2dDescriptor(convDesc,
/*pad_height=*/1,
/*pad_width=*/1,
/*vertical_stride=*/1,
/*horizontal_stride=*/1,
/*dilation_height=*/1,
/*dilation_width=*/1,
/*mode=*/CUDNN_CROSS_CORRELATION,
CUDNN_DATA_FLOAT);
int batch_size{0}, channels{0}, height{0}, width{0};
cudnnGetConvolution2dForwardOutputDim(convDesc,
convInputDescriptor,
convKernelDescriptor,
&batch_size,
&channels,
&height,
&width);
cudnnCreateTensorDescriptor(&convOutputDescriptor);
cudnnSetTensor4dDescriptor(convOutputDescriptor,
/*format=*/CUDNN_TENSOR_NCHW,
/*dataType=*/CUDNN_DATA_FLOAT,
/*batch_size=*/1,
/*channels=*/N,
/*image_height=*/H,
/*image_width=*/W);
cudnnGetConvolutionForwardWorkspaceSize(convCudnn,
convInputDescriptor,
convKernelDescriptor,
convDesc,
convOutputDescriptor,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
&workspace_bytes);
cudaMalloc(&d_workspace, workspace_bytes);
unsigned int kernelSize = R*S*C*N;//kernel
this->cpuKernel = (float *)malloc(kernelSize*sizeof(float));
for(int i=0;i<kernelSize;++i){
this->cpuKernel[i] = 1.0f;
}
cudaMemcpy(kernel,cpuKernel,R*S*C*N*sizeof(float),cudaMemcpyHostToDevice);
free(cpuKernel);
}
float * ConvFFT::forward(float *input) {
cudaMemset(output, 0, 1*N*H*W*sizeof(float));
checkCUDNN(cudnnConvolutionForward(convCudnn,
&alpha,
convInputDescriptor,
input,
convKernelDescriptor,
kernel,
convDesc,
CUDNN_CONVOLUTION_FWD_ALGO_FFT,
d_workspace,
workspace_bytes,
&beta,
convOutputDescriptor,
output));
return output;
}
extern "C" __global__ void default_function_kernel0(float* __restrict__ data, float* __restrict__ kernel, float* __restrict__ compute) {
float compute_local[4];
__shared__ float pad_temp_shared[2560];
__shared__ float kernel_shared[2560];
float pad_temp_shared_local[10];
float kernel_shared_local[10];
compute_local[(0)] = 0.000000e+00f;
compute_local[(2)] = 0.000000e+00f;
compute_local[(1)] = 0.000000e+00f;
compute_local[(3)] = 0.000000e+00f;
for (int ry_outer = 0; ry_outer < 3; ++ry_outer) {
for (int rx_outer = 0; rx_outer < 3; ++rx_outer) {
__syncthreads();
pad_temp_shared[((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 1))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 2))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 3))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 4))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 5))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 6))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 7))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 1) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 8))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 9))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 10))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 11))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 2) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 12))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 13))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 14))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 15))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 3) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 16))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 755))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 17))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 756))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 18))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 757))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 19))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 758))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 20))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 21))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 22))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 23))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 5) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 24))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 25))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 26))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 27))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 2) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 6) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 2) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 28))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 29))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 30))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 31))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 3) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 7) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 3) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 32))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1539))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 33))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1540))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 34))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1541))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 35))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + ((((int)threadIdx.x) * 10) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + (((((int)threadIdx.x) * 10) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + (((((int)threadIdx.x) * 10) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) + 1542))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 36))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (1 <= ((((int)blockIdx.x) * 4) + rx_outer))) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 29))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 37))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 28))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 38))] = (((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 27))] : 0.000000e+00f);
pad_temp_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 39))] = ((((1 <= (((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3))) && ((((((int)blockIdx.y) * 4) + ry_outer) + (((((int)threadIdx.x) * 10) + 1) & 3)) < 29)) && (((((int)blockIdx.x) * 4) + rx_outer) < 26)) ? data[((((((((((((int)threadIdx.z) * 15680) + (((int)threadIdx.y) * 7840)) + ((((((int)threadIdx.x) * 10) + 9) >> 2) * 784)) + (((int)blockIdx.y) * 112)) + (ry_outer * 28)) + ((((((int)threadIdx.x) * 10) + 1) & 3) * 28)) + (((int)blockIdx.x) * 4)) + rx_outer) - 26))] : 0.000000e+00f);
kernel_shared[((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)))] = kernel[(((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 1))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 9))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 2))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 18))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 3))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 27))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 4))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 36))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 5))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 45))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 6))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 54))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 7))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 63))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 8))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 72))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 9))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 81))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 10))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 90))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 11))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 99))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 12))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 108))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 13))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 117))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 14))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 126))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 15))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 135))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 16))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 144))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 17))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 153))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 18))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 162))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 19))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 171))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 20))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 180))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 21))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 189))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 22))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 198))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 23))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 207))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 24))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 216))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 25))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 225))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 26))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 234))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 27))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 243))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 28))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 252))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 29))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 261))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 30))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 270))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 31))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 279))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 32))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 288))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 33))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 297))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 34))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 306))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 35))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 315))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 36))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 324))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 37))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 333))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 38))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 342))];
kernel_shared[(((((((int)threadIdx.z) * 320) + (((int)threadIdx.y) * 160)) + (((int)threadIdx.x) * 40)) + 39))] = kernel[((((((((((int)blockIdx.z) * 23040) + (((int)threadIdx.z) * 2880)) + (((int)threadIdx.y) * 1440)) + (((int)threadIdx.x) * 360)) + (ry_outer * 3)) + rx_outer) + 351))];
__syncthreads();
pad_temp_shared_local[(0)] = pad_temp_shared[(((((int)threadIdx.y) * 4) + ((int)threadIdx.x)))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 8))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 16))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 24))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 32))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 40))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 48))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 56))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 64))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 72))];
kernel_shared_local[(0)] = kernel_shared[((((int)threadIdx.z) * 160))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1280))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1281))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 2))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1282))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 3))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1283))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 4))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1284))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 80))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 88))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 96))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 104))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 112))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 120))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 128))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 136))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 144))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 152))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 5))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1285))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 6))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1286))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 7))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1287))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 8))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1288))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 9))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1289))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 160))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 168))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 176))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 184))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 192))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 200))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 208))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 216))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 224))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 232))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 10))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1290))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 11))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1291))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 12))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1292))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 13))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1293))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 14))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1294))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 240))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 248))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 256))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 264))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 272))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 280))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 288))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 296))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 304))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 312))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 15))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1295))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 16))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1296))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 17))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1297))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 18))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1298))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 19))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1299))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 320))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 328))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 336))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 344))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 352))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 360))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 368))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 376))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 384))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 392))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 20))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1300))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 21))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1301))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 22))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1302))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 23))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1303))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 24))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1304))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 400))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 408))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 416))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 424))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 432))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 440))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 448))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 456))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 464))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 472))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 25))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1305))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 26))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1306))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 27))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1307))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 28))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1308))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 29))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1309))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 480))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 488))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 496))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 504))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 512))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 520))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 528))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 536))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 544))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 552))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 30))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1310))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 31))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1311))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 32))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1312))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 33))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1313))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 34))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1314))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 560))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 568))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 576))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 584))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 592))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 600))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 608))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 616))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 624))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 632))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 35))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1315))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 36))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1316))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 37))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1317))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 38))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1318))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 39))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1319))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 640))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 648))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 656))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 664))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 672))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 680))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 688))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 696))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 704))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 712))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 40))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1320))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 41))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1321))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 42))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1322))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 43))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1323))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 44))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1324))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 720))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 728))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 736))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 744))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 752))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 760))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 768))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 776))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 784))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 792))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 45))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1325))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 46))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1326))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 47))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1327))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 48))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1328))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 49))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1329))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 800))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 808))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 816))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 824))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 832))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 840))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 848))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 856))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 864))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 872))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 50))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1330))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 51))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1331))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 52))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1332))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 53))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1333))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 54))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1334))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 880))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 888))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 896))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 904))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 912))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 920))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 928))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 936))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 944))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 952))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 55))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1335))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 56))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1336))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 57))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1337))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 58))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1338))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 59))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1339))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 960))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 968))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 976))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 984))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 992))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1000))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1008))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1016))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1024))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1032))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 60))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1340))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 61))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1341))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 62))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1342))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 63))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1343))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 64))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1344))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1040))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1048))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1056))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1064))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1072))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1080))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1088))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1096))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1104))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1112))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 65))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1345))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 66))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1346))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 67))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1347))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 68))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1348))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 69))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1349))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1120))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1128))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1136))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1144))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1152))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1160))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1168))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1176))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1184))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1192))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 70))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1350))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 71))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1351))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 72))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1352))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 73))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1353))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 74))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1354))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1200))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1208))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1216))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1224))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1232))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1240))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1248))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1256))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1264))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1272))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 75))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1355))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 76))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1356))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 77))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1357))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 78))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1358))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 79))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1359))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1280))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1288))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1296))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1304))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1312))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1320))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1328))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1336))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1344))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1352))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 80))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1360))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 81))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1361))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 82))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1362))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 83))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1363))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 84))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1364))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1360))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1368))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1376))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1384))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1392))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1400))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1408))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1416))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1424))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1432))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 85))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1365))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 86))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1366))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 87))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1367))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 88))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1368))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 89))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1369))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1440))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1448))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1456))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1464))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1472))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1480))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1488))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1496))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1504))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1512))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 90))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1370))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 91))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1371))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 92))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1372))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 93))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1373))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 94))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1374))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1520))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1528))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1536))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1544))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1552))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1560))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1568))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1576))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1584))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1592))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 95))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1375))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 96))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1376))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 97))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1377))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 98))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1378))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 99))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1379))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1600))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1608))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1616))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1624))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1632))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1640))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1648))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1656))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1664))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1672))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 100))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1380))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 101))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1381))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 102))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1382))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 103))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1383))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 104))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1384))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1680))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1688))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1696))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1704))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1712))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1720))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1728))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1736))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1744))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1752))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 105))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1385))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 106))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1386))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 107))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1387))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 108))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1388))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 109))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1389))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1760))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1768))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1776))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1784))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1792))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1800))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1808))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1816))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1824))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1832))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 110))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1390))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 111))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1391))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 112))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1392))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 113))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1393))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 114))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1394))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1840))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1848))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1856))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1864))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1872))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1880))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1888))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1896))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1904))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1912))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 115))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1395))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 116))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1396))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 117))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1397))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 118))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1398))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 119))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1399))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1920))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1928))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1936))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1944))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1952))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1960))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1968))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1976))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1984))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 1992))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 120))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1400))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 121))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1401))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 122))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1402))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 123))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1403))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 124))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1404))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2000))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2008))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2016))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2024))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2032))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2040))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2048))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2056))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2064))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2072))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 125))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1405))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 126))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1406))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 127))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1407))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 128))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1408))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 129))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1409))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2080))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2088))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2096))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2104))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2112))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2120))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2128))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2136))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2144))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2152))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 130))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1410))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 131))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1411))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 132))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1412))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 133))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1413))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 134))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1414))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2160))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2168))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2176))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2184))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2192))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2200))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2208))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2216))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2224))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2232))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 135))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1415))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 136))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1416))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 137))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1417))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 138))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1418))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 139))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1419))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2240))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2248))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2256))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2264))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2272))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2280))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2288))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2296))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2304))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2312))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 140))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1420))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 141))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1421))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 142))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1422))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 143))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1423))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 144))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1424))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2320))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2328))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2336))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2344))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2352))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2360))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2368))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2376))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2384))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2392))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 145))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1425))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 146))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1426))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 147))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1427))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 148))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1428))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 149))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1429))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2400))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2408))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2416))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2424))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2432))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2440))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2448))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2456))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2464))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2472))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 150))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1430))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 151))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1431))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 152))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1432))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 153))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1433))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 154))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1434))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
pad_temp_shared_local[(0)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2480))];
pad_temp_shared_local[(5)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2488))];
pad_temp_shared_local[(1)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2496))];
pad_temp_shared_local[(6)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2504))];
pad_temp_shared_local[(2)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2512))];
pad_temp_shared_local[(7)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2520))];
pad_temp_shared_local[(3)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2528))];
pad_temp_shared_local[(8)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2536))];
pad_temp_shared_local[(4)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2544))];
pad_temp_shared_local[(9)] = pad_temp_shared[((((((int)threadIdx.y) * 4) + ((int)threadIdx.x)) + 2552))];
kernel_shared_local[(0)] = kernel_shared[(((((int)threadIdx.z) * 160) + 155))];
kernel_shared_local[(5)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1435))];
kernel_shared_local[(1)] = kernel_shared[(((((int)threadIdx.z) * 160) + 156))];
kernel_shared_local[(6)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1436))];
kernel_shared_local[(2)] = kernel_shared[(((((int)threadIdx.z) * 160) + 157))];
kernel_shared_local[(7)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1437))];
kernel_shared_local[(3)] = kernel_shared[(((((int)threadIdx.z) * 160) + 158))];
kernel_shared_local[(8)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1438))];
kernel_shared_local[(4)] = kernel_shared[(((((int)threadIdx.z) * 160) + 159))];
kernel_shared_local[(9)] = kernel_shared[(((((int)threadIdx.z) * 160) + 1439))];
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(0)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(0)] * kernel_shared_local[(5)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(0)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(5)] * kernel_shared_local[(5)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(1)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(1)] * kernel_shared_local[(6)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(1)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(6)] * kernel_shared_local[(6)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(2)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(2)] * kernel_shared_local[(7)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(2)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(7)] * kernel_shared_local[(7)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(3)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(3)] * kernel_shared_local[(8)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(3)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(8)] * kernel_shared_local[(8)]));
compute_local[(0)] = (compute_local[(0)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(4)]));
compute_local[(2)] = (compute_local[(2)] + (pad_temp_shared_local[(4)] * kernel_shared_local[(9)]));
compute_local[(1)] = (compute_local[(1)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(4)]));
compute_local[(3)] = (compute_local[(3)] + (pad_temp_shared_local[(9)] * kernel_shared_local[(9)]));
}
}
compute[(((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)))] = compute_local[(0)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 6272))] = compute_local[(2)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 56))] = compute_local[(1)];
compute[((((((((((int)blockIdx.z) * 12544) + (((int)threadIdx.z) * 784)) + (((int)blockIdx.y) * 112)) + (((int)threadIdx.y) * 28)) + (((int)blockIdx.x) * 4)) + ((int)threadIdx.x)) + 6328))] = compute_local[(3)];
}
float check_diff(float *x, float *y, unsigned int size){
float diff = 0.0f;
#pragma omp parallel for reduction(+ : diff)
for(unsigned int i=0;i<size;++i){
diff += abs(x[i] - y[i]);
}
return diff;
}
void pad_input(float * x, float *y){
#pragma omp parallel for
for(unsigned int i=0;i<(H + 2)*(W+2)*C;++i){
y[i] = 0.0f;
}
#pragma omp parallel for
for(unsigned int c=0;c<C;++c){
for(unsigned int h=0;h<H;++h){
for(unsigned int w=0;w<W;++w){
unsigned int h_padded = h + 1;
unsigned int w_padded = w + 1;
y[c*(H+2)*(W+2) + h_padded*(W+2) + w_padded] = x[c*(H)*(W) + h*(W) + w];
}
}
}
}
int main(void){
float *input = new float[C*H*W];
time_t t;
srand((unsigned) time(&t));
for(int i =0;i<C*H*W;++i){
input[i] = rand() % 10;
}
float * padded_input = new float[C*(H+2)*(W+2)];
pad_input(input, padded_input);
float *device_input;
cudaMalloc(&device_input,C*H*W*sizeof(float));
cudaMemcpy(device_input,input,C*H*W*sizeof(float),cudaMemcpyHostToDevice);
float *K = new float[C*N*9];
for(int i=0;i<C*N*9;++i){
K[i] = 1.0f;
}
ConvGemm convGemm;
convGemm.initialize();
ConvWinogradeNon convWinogradeNon;
convWinogradeNon.initialize();
ConvFFT convFFT;
convFFT.initialize();
float *out_cudnn;
float *out_cudnn_host = new float[N*H*W];
cudaEvent_t event_start;
cudaEvent_t event_stop;
cudaEventCreate(&event_start);
cudaEventCreate(&event_stop);
out_cudnn = convGemm.forward(device_input);
cudaMemcpy(out_cudnn_host,out_cudnn,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
out_cudnn = convFFT.forward(device_input);
out_cudnn = convWinogradeNon.forward(device_input);
float *device_K;
float *device_out;
cudaMalloc(&device_out,H*W*N*sizeof(float));
cudaMemset(device_out,0,H*W*N*sizeof(float));
cudaMalloc(&device_K,C*N*9*sizeof(float));
cudaMemcpy(device_K,K,C*N*9*sizeof(float),cudaMemcpyHostToDevice);
cudaEventRecord(event_start);
convGemm.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnGemmTime;
cudaEventElapsedTime(&cudnnGemmTime, event_start, event_stop);
cudaEventRecord(event_start);
convWinogradeNon.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnWinogradeTimeNon;
cudaEventElapsedTime(&cudnnWinogradeTimeNon, event_start, event_stop);
cudaEventRecord(event_start);
convFFT.forward(device_input);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float cudnnFFTTime;
cudaEventElapsedTime(&cudnnFFTTime, event_start, event_stop);
dim3 grid(7,7,6);
dim3 block(4,2,8);
float * paddedInputDevice;
chkerr(cudaMalloc(&paddedInputDevice, C * (H + 2) * (W + 2) * sizeof(float)));
chkerr(cudaMemcpy(paddedInputDevice, padded_input, C * (H + 2) * (W + 2) * sizeof(float), cudaMemcpyHostToDevice));
cudaEventRecord(event_start);
default_function_kernel0<<<grid, block>>>(device_input, device_K, device_out);
cudaEventRecord(event_stop);
cudaEventSynchronize(event_stop);
float time_tdc;
cudaEventElapsedTime(&time_tdc, event_start, event_stop);
float *out_tdc = new float[N*H*W];
cudaMemcpy(out_tdc,device_out,N*H*W*sizeof(float),cudaMemcpyDeviceToHost);
float difference = check_diff(out_cudnn_host, out_tdc, N*H*W);
cout<<N<<","<<C<<","<<H<<","<<W<<","<<cudnnFFTTime<<","<<cudnnWinogradeTimeNon<<","<<cudnnGemmTime<<","<<time_tdc<<","<<cudnnFFTTime/time_tdc<<","<<cudnnWinogradeTimeNon/time_tdc<<","<<cudnnGemmTime/time_tdc<<endl;
return 0;
}
|
015fb4fb69db385a682202dd7019944ec4ca98ef.hip | // !!! This is a file automatically generated by hipify!!!
/**
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <limits>
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/fsa_algo.h"
#include "k2/csrc/fsa_utils.h"
#include "k2/csrc/host/aux_labels.h"
#include "k2/csrc/host/connect.h"
#include "k2/csrc/host/determinize.h"
#include "k2/csrc/host/intersect.h"
#include "k2/csrc/host/rmepsilon.h"
#include "k2/csrc/host/topsort.h"
#include "k2/csrc/host_shim.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/rm_epsilon.h"
// this contains a subset of the algorithms in fsa_algo.h; currently it just
// contains one that are wrappings of the corresponding algorithms in
// host/.
namespace k2 {
bool RecursionWrapper(bool (*f)(Fsa &, Fsa *, Array1<int32_t> *), Fsa &src,
Fsa *dest, Array1<int32_t> *arc_map) {
NVTX_RANGE(K2_FUNC);
// src is actually an FsaVec. Just recurse for now.
int32_t num_fsas = src.shape.Dim0();
std::vector<Fsa> srcs(num_fsas), dests(num_fsas);
std::vector<Array1<int32_t>> arc_maps(num_fsas);
int32_t tot_num_arcs = 0;
for (int32_t i = 0; i < num_fsas; ++i) {
srcs[i] = src.Index(0, i);
// Recurse.
if (!f(srcs[i], &(dests[i]),
(arc_map != nullptr ? &(arc_maps[i]) : nullptr)))
return false;
if (arc_map != nullptr) {
// convert arc indexes in arc_maps from idx2 to idx012
arc_maps[i] = Plus(arc_maps[i], tot_num_arcs);
tot_num_arcs += srcs[i].NumElements();
}
}
*dest = Stack(0, num_fsas, dests.data());
if (arc_map != nullptr)
*arc_map = Cat(src.Context(), num_fsas, arc_maps.data());
return true;
}
bool Connect(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapper(Connect, src, dest, arc_map);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
k2host::Connection c(host_fsa);
k2host::Array2Size<int32_t> size;
c.GetSizes(&size);
FsaCreator creator(size);
k2host::Fsa host_dest_fsa = creator.GetHostFsa();
int32_t *arc_map_data = nullptr;
if (arc_map != nullptr) {
*arc_map = Array1<int32_t>(src.Context(), size.size2);
arc_map_data = arc_map->Data();
}
bool ans = c.GetOutput(&host_dest_fsa, arc_map_data);
*dest = creator.GetFsa();
return ans;
}
bool TopSortHost(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapper(TopSortHost, src, dest, arc_map);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
k2host::TopSorter sorter(host_fsa);
k2host::Array2Size<int32_t> size;
sorter.GetSizes(&size);
FsaCreator creator(size);
k2host::Fsa host_dest_fsa = creator.GetHostFsa();
int32_t *arc_map_data = nullptr;
if (arc_map != nullptr) {
*arc_map = Array1<int32_t>(src.Context(), size.size2);
arc_map_data = arc_map->Data();
}
bool ans = sorter.GetOutput(&host_dest_fsa, arc_map_data);
*dest = creator.GetFsa();
return ans;
}
bool Intersect(FsaOrVec &a_fsas, int32_t properties_a, FsaOrVec &b_fsas,
int32_t properties_b, bool treat_epsilons_specially, FsaVec *out,
Array1<int32_t> *arc_map_a, Array1<int32_t> *arc_map_b) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(a_fsas.NumAxes() >= 2 && a_fsas.NumAxes() <= 3);
K2_CHECK(b_fsas.NumAxes() >= 2 && b_fsas.NumAxes() <= 3);
ContextPtr c = a_fsas.Context();
K2_CHECK_EQ(c->GetDeviceType(), kCpu);
if (a_fsas.NumAxes() == 2) {
FsaVec a_fsas_vec = FsaToFsaVec(a_fsas);
return Intersect(a_fsas_vec, properties_a, b_fsas, properties_b,
treat_epsilons_specially, out, arc_map_a, arc_map_b);
}
if (b_fsas.NumAxes() == 2) {
FsaVec b_fsas_vec = FsaToFsaVec(b_fsas);
return Intersect(a_fsas, properties_a, b_fsas_vec, properties_b,
treat_epsilons_specially, out, arc_map_a, arc_map_b);
}
int32_t num_fsas_a = a_fsas.Dim0(), num_fsas_b = b_fsas.Dim0();
K2_CHECK_GT(num_fsas_a, 0);
K2_CHECK_GT(num_fsas_b, 0);
int32_t stride_a = 1, stride_b = 1;
if (num_fsas_a != num_fsas_b) {
if (num_fsas_a == 1) {
stride_a = 0;
} else if (num_fsas_b == 1) {
stride_b = 0;
} else {
K2_CHECK_EQ(num_fsas_a, num_fsas_b);
}
// the check on the previous line will fail.
}
if (properties_a < 0) {
Array1<int32_t> properties_a_out(c, num_fsas_a);
GetFsaVecBasicProperties(a_fsas, &properties_a_out, &properties_a);
}
if (properties_b < 0) {
Array1<int32_t> properties_b_out(c, num_fsas_b);
GetFsaVecBasicProperties(b_fsas, &properties_b_out, &properties_b);
}
bool arc_sorted = (properties_a & kFsaPropertiesArcSorted) &&
(properties_b & kFsaPropertiesArcSorted);
K2_CHECK(arc_sorted) << "Both a_fsas and b_fsas should be arc-sorted";
int32_t num_fsas = ::max(num_fsas_a, num_fsas_b);
std::vector<std::unique_ptr<k2host::Intersection>> intersections(num_fsas);
std::vector<k2host::Array2Size<int32_t>> sizes(num_fsas);
for (int32_t i = 0; i < num_fsas; ++i) {
k2host::Fsa host_fsa_a = FsaVecToHostFsa(a_fsas, i * stride_a),
host_fsa_b = FsaVecToHostFsa(b_fsas, i * stride_b);
intersections[i] = std::make_unique<k2host::Intersection>(
host_fsa_a, host_fsa_b, treat_epsilons_specially, false);
intersections[i]->GetSizes(&(sizes[i]));
}
FsaVecCreator creator(sizes);
int32_t num_arcs = creator.NumArcs();
if (arc_map_a) *arc_map_a = Array1<int32_t>(c, num_arcs);
if (arc_map_b) *arc_map_b = Array1<int32_t>(c, num_arcs);
// the following few lines will allow us to add suitable offsets to the
// `arc_map`.
Array1<int32_t> a_fsas_row_splits12 =
a_fsas.RowSplits(2)[a_fsas.RowSplits(1)],
b_fsas_row_splits12 =
b_fsas.RowSplits(2)[b_fsas.RowSplits(1)];
const int32_t *a_fsas_row_splits12_data = a_fsas_row_splits12.Data(),
*b_fsas_row_splits12_data = b_fsas_row_splits12.Data();
bool ok = true;
for (int32_t i = 0; i < num_fsas; ++i) {
k2host::Fsa host_fsa_out = creator.GetHostFsa(i);
int32_t arc_offset = creator.GetArcOffsetFor(i);
int32_t *this_arc_map_a =
(arc_map_a ? arc_map_a->Data() + arc_offset : nullptr),
*this_arc_map_b =
(arc_map_b ? arc_map_b->Data() + arc_offset : nullptr);
bool ans = intersections[i]->GetOutput(&host_fsa_out, this_arc_map_a,
this_arc_map_b);
ok = ok && ans;
int32_t this_num_arcs = creator.GetArcOffsetFor(i + 1) - arc_offset;
if (arc_map_a) {
int32_t arc_offset_a = a_fsas_row_splits12_data[i * stride_a];
for (int32_t i = 0; i < this_num_arcs; i++)
this_arc_map_a[i] += arc_offset_a;
}
if (arc_map_b) {
int32_t arc_offset_b = b_fsas_row_splits12_data[i * stride_b];
for (int32_t i = 0; i < this_num_arcs; i++)
this_arc_map_b[i] += arc_offset_b;
}
}
*out = creator.GetFsaVec();
return ok;
}
// Will be used in RemoveEpsilonHost and Determinize below to process FsaVec
// input recursively.
void RecursionWrapper(void (*f)(FsaOrVec &, FsaOrVec *, Ragged<int32_t> *),
FsaOrVec &src, FsaOrVec *dest,
Ragged<int32_t> *arc_deriv) {
NVTX_RANGE(K2_FUNC);
// src is actually an FsaVec. Just recurse for now.
K2_CHECK_EQ(src.NumAxes(), 3);
int32_t num_fsas = src.shape.Dim0();
std::vector<Fsa> srcs(num_fsas), dests(num_fsas);
std::vector<Ragged<int32_t>> arc_derivs(num_fsas);
int32_t tot_num_arcs = 0;
for (int32_t i = 0; i < num_fsas; ++i) {
srcs[i] = src.Index(0, i);
f(srcs[i], &(dests[i]), arc_deriv != nullptr ? &(arc_derivs[i]) : nullptr);
if (arc_deriv != nullptr) {
// convert arc indexes in arc_derivs from idx2 to idx012
Array1<int32_t> &values = arc_derivs[i].values;
values = Plus(values, tot_num_arcs);
tot_num_arcs += srcs[i].NumElements();
}
}
*dest = Stack(0, num_fsas, dests.data());
if (arc_deriv != nullptr) *arc_deriv = Cat(0, num_fsas, arc_derivs.data());
}
void RemoveEpsilonHost(FsaOrVec &src, FsaOrVec *dest,
Ragged<int32_t> *arc_derivs /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapper(RemoveEpsilonHost, src, dest, arc_derivs);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
int32_t num_states = host_fsa.NumStates();
K2_CHECK_EQ(num_states, src.Dim0());
std::vector<double> max_forward_weights(num_states);
std::vector<double> max_backward_weights(num_states);
k2host::WfsaWithFbWeights max_wfsa(host_fsa, k2host::kMaxWeight,
max_forward_weights.data(),
max_backward_weights.data());
// pass infinity as beam since we don't do pruning here.
float beam = std::numeric_limits<float>::infinity();
k2host::EpsilonsRemoverPrunedMax eps_remover(max_wfsa, beam);
k2host::Array2Size<int32_t> fsa_size, arc_derivs_size;
eps_remover.GetSizes(&fsa_size, &arc_derivs_size);
FsaCreator fsa_creator(fsa_size);
k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa();
K2_STATIC_ASSERT(
(std::is_same<k2host::MaxTracebackState::DerivType, int32_t>::value));
Ragged2Creator<int32_t> ragged_creator(arc_derivs_size);
k2host::Array2<int32_t *, int32_t> host_arc_derivs =
ragged_creator.GetHostArray2();
eps_remover.GetOutput(&host_dest_fsa, &host_arc_derivs);
*dest = fsa_creator.GetFsa();
if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2();
}
void RemoveEpsilon(FsaOrVec &src, int32_t properties,
FsaOrVec *dest,
Ragged<int32_t> *arc_derivs) {
if ((properties & kFsaPropertiesTopSortedAndAcyclic) != 0 &&
src.Context()->GetDeviceType() == kCpu) {
// Host version of the algorithm
RemoveEpsilonHost(src, dest, arc_derivs);
} else {
RemoveEpsilonDevice(src, dest, arc_derivs);
}
}
void RemoveEpsilonAndAddSelfLoops(FsaOrVec &src, int32_t properties,
FsaOrVec *dest,
Ragged<int32_t> *arc_derivs) {
NVTX_RANGE(K2_FUNC);
Ragged<int32_t> arc_derivs1;
FsaOrVec temp;
RemoveEpsilon(src, properties, &temp,
(arc_derivs != nullptr ? &arc_derivs1 : nullptr));
Array1<int32_t> arc_derivs2;
AddEpsilonSelfLoops(temp, dest,
(arc_derivs != nullptr ? &arc_derivs2 : nullptr));
if (arc_derivs != nullptr) {
*arc_derivs = Index(arc_derivs1, 0, arc_derivs2, nullptr);
}
}
void Determinize(FsaOrVec &src, FsaOrVec *dest,
Ragged<int32_t> *arc_derivs /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapper(Determinize, src, dest, arc_derivs);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
int32_t num_states = host_fsa.NumStates();
K2_CHECK_EQ(num_states, src.Dim0());
int32_t max_step = -1; // no limit
k2host::DeterminizerMax determinizer(host_fsa, max_step);
k2host::Array2Size<int32_t> fsa_size, arc_derivs_size;
determinizer.GetSizes(&fsa_size, &arc_derivs_size);
FsaCreator fsa_creator(fsa_size);
k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa();
K2_STATIC_ASSERT(
(std::is_same<k2host::MaxTracebackState::DerivType, int32_t>::value));
Ragged2Creator<int32_t> ragged_creator(arc_derivs_size);
k2host::Array2<int32_t *, int32_t> host_arc_derivs =
ragged_creator.GetHostArray2();
determinizer.GetOutput(&host_dest_fsa, &host_arc_derivs);
*dest = fsa_creator.GetFsa();
if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2();
}
Fsa LinearFsa(const Array1<int32_t> &symbols) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = symbols.Context();
int32_t n = symbols.Dim(), num_states = n + 2, num_arcs = n + 1;
Array1<int32_t> row_splits1 = Range(c, num_states + 1, 0),
row_ids1 = Range(c, num_arcs, 0);
int32_t *row_splits1_data = row_splits1.Data();
Array1<Arc> arcs(c, num_arcs);
Arc *arcs_data = arcs.Data();
const int32_t *symbols_data = symbols.Data();
K2_EVAL(
c, num_arcs, lambda_set_arcs, (int32_t arc_idx01)->void {
int32_t src_state = arc_idx01, dest_state = arc_idx01 + 1,
// -1 == kFinalSymbol
symbol = (arc_idx01 < n ? symbols_data[arc_idx01] : -1);
if (arc_idx01 < n) K2_CHECK_NE(symbol, -1);
float score = 0.0;
arcs_data[arc_idx01] = Arc(src_state, dest_state, symbol, score);
// the final state has no leaving arcs.
if (arc_idx01 == 0) row_splits1_data[num_states] = num_arcs;
});
return Ragged<Arc>(RaggedShape2(&row_splits1, &row_ids1, num_arcs), arcs);
}
FsaVec LinearFsas(const Ragged<int32_t> &symbols) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(symbols.NumAxes(), 2);
ContextPtr &c = symbols.Context();
// if there are n symbols, there are n+2 states and n+1 arcs.
RaggedShape states_shape = ChangeSublistSize(symbols.shape, 2);
int32_t num_states = states_shape.NumElements(),
num_arcs = symbols.NumElements() + symbols.Dim0();
// row_splits2 maps from state_idx01 to arc_idx012; row_ids2 does the reverse.
// We'll set them in the lambda below.
Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs);
int32_t *row_ids2_data = row_ids2.Data(),
*row_splits2_data = row_splits2.Data();
const int32_t *row_ids1_data = states_shape.RowIds(1).Data(),
*row_splits1_data = states_shape.RowSplits(1).Data(),
*symbols_data = symbols.values.Data();
Array1<Arc> arcs(c, num_arcs);
Arc *arcs_data = arcs.Data();
K2_EVAL(
c, num_states, lambda, (int32_t state_idx01)->void {
int32_t fsa_idx0 = row_ids1_data[state_idx01],
state_idx0x = row_splits1_data[fsa_idx0],
next_state_idx0x = row_splits1_data[fsa_idx0 + 1],
idx1 = state_idx01 - state_idx0x;
// the following works because each FSA has one fewer arcs than states.
int32_t arc_idx0xx = state_idx0x - fsa_idx0,
next_arc_idx0xx = next_state_idx0x - (fsa_idx0 + 1),
// the following may look a bit wrong.. here, the idx1 is the
// same as the idx12 if the arc exists, because each state has
// one arc leaving it (except the last state).
arc_idx012 = arc_idx0xx + idx1;
// the following works because each FSA has one fewer symbols than arcs
// (however it doesn't work for the last arc of each FSA; we check
// below.)
int32_t symbol_idx01 = arc_idx012 - fsa_idx0;
if (arc_idx012 < next_arc_idx0xx) {
int32_t src_state = idx1, dest_state = idx1 + 1,
symbol = (arc_idx012 + 1 < next_arc_idx0xx
? symbols_data[symbol_idx01]
: -1); // kFinalSymbol
float score = 0.0;
arcs_data[arc_idx012] = Arc(src_state, dest_state, symbol, score);
row_ids2_data[arc_idx012] = state_idx01;
} else {
// The following ensures that the last element of row_splits1_data
// (i.e. row_splits1[num_states]) is set to num_arcs. It also writes
// something unnecessary for the last state of each FSA but the last
// one, which will cause 2 threads to write the same item to the same
// location. Note that there is no arc with index `arc_idx01`, if you
// reach here.
row_splits2_data[state_idx01 + 1] = arc_idx012;
}
row_splits2_data[state_idx01] = arc_idx012;
});
return Ragged<Arc>(
RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1),
num_states, &row_splits2, &row_ids2, num_arcs),
arcs);
}
void ArcSort(Fsa *fsa) {
if (fsa->NumAxes() < 2) return; // it is empty
SortSublists<Arc>(fsa);
}
void ArcSort(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
if (!src.values.IsValid()) return;
if (arc_map != nullptr)
*arc_map = Array1<int32_t>(src.Context(), src.NumElements());
Fsa tmp(src.shape, src.values.Clone());
SortSublists<Arc>(&tmp, arc_map);
*dest = tmp;
}
// TODO(fangjun): use the following method suggested by Dan
//
// ... incidentally, it's possible to further optimize this so the run
// time is less than linear, by using methods similar to what I use
// in GetStateBatches(); imagine computing a table that instead of
// the best traceback, is the best 2-step traceback; and then the 4-step
// traceback, and so on. There's no need for this right now, since the
// forward-pass algorithm is already at least linear-time in the length
// of this path. But we can consider it for the future.
Ragged<int32_t> ShortestPath(FsaVec &fsas,
const Array1<int32_t> &entering_arcs) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(fsas.NumAxes(), 3);
const int32_t *entering_arcs_data = entering_arcs.Data();
const Arc *arcs_data = fsas.values.Data();
int32_t num_fsas = fsas.Dim0();
int32_t num_states = fsas.TotSize(1);
ContextPtr &context = fsas.Context();
// allocate an extra element for ExclusiveSum
Array1<int32_t> num_best_arcs_per_fsa(context, num_fsas + 1);
int32_t *num_best_arcs_per_fsa_data = num_best_arcs_per_fsa.Data();
const int32_t *row_splits1_data = fsas.RowSplits(1).Data();
// -1 represents an invalid arc_index.
// This extra array avoids an extra iteration over `entering_arcs`.
Array1<int32_t> state_best_arc_index_array(context, num_states, -1);
int32_t *state_best_arc_index_array_data = state_best_arc_index_array.Data();
K2_EVAL(
context, num_fsas, lambda_set_num_best_arcs, (int32_t fsas_idx0) {
int32_t state_idx01 = row_splits1_data[fsas_idx0];
int32_t state_idx01_next = row_splits1_data[fsas_idx0 + 1];
if (state_idx01_next == state_idx01) {
// this fsa is empty, so there is no best path available
num_best_arcs_per_fsa_data[fsas_idx0] = 0;
return;
}
int32_t final_state_idx01 = state_idx01_next - 1;
int32_t cur_state = final_state_idx01;
int32_t cur_index = entering_arcs_data[cur_state];
int32_t num_arcs = 0;
int32_t *p = state_best_arc_index_array_data + final_state_idx01;
while (cur_index != -1) {
*p = cur_index;
--p;
cur_state = arcs_data[cur_index].src_state + state_idx01;
cur_index = entering_arcs_data[cur_state];
++num_arcs;
}
num_best_arcs_per_fsa_data[fsas_idx0] = num_arcs;
});
ExclusiveSum(num_best_arcs_per_fsa, &num_best_arcs_per_fsa);
RaggedShape shape = RaggedShape2(&num_best_arcs_per_fsa, nullptr, -1);
const int32_t *shape_row_splits1_data = shape.RowSplits(1).Data();
const int32_t *shape_row_ids1_data = shape.RowIds(1).Data();
const int32_t *ans_row_splits_data = shape.RowSplits(1).Data();
Array1<int32_t> best_path_arc_indexes(context, shape.NumElements());
int32_t *best_path_arc_indexes_data = best_path_arc_indexes.Data();
K2_EVAL(
context, shape.NumElements(), lambda_set_best_arcs, (int32_t ans_idx01) {
int32_t fsa_idx0 = shape_row_ids1_data[ans_idx01];
int32_t ans_idx0x = shape_row_splits1_data[fsa_idx0];
int32_t ans_idx1 = ans_idx01 - ans_idx0x;
int32_t num_arcs_this_fsa = num_best_arcs_per_fsa_data[fsa_idx0 + 1] -
num_best_arcs_per_fsa_data[fsa_idx0];
if (num_arcs_this_fsa == 0) return;
int32_t final_state_idx01_this_fsa = row_splits1_data[fsa_idx0 + 1] - 1;
const int32_t *p_start = state_best_arc_index_array_data +
final_state_idx01_this_fsa -
num_arcs_this_fsa + 1;
best_path_arc_indexes_data[ans_idx01] = p_start[ans_idx1];
});
Ragged<int32_t> ans(shape, best_path_arc_indexes);
return ans;
}
void AddEpsilonSelfLoops(FsaOrVec &src, FsaOrVec *dest,
Array1<int32_t> *arc_map /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
const int32_t *old_row_splits1_data = src.RowSplits(1).Data(),
*old_row_ids1_data = src.RowIds(1).Data();
const Arc *old_arcs_data = src.values.Data();
if (src.NumAxes() == 2) {
int32_t num_states = src.Dim0();
if (num_states < 2) {
K2_CHECK_EQ(num_states, 0);
*dest = src;
if (arc_map != nullptr) *arc_map = Array1<int32_t>(c, 0);
return;
}
int32_t old_num_arcs = src.TotSize(1),
new_num_arcs = old_num_arcs + (num_states - 1);
Array1<int32_t> new_row_splits(c, num_states + 1),
new_row_ids(c, new_num_arcs);
Array1<Arc> new_arcs(c, new_num_arcs);
int32_t *new_row_splits1_data = new_row_splits.Data(),
*new_row_ids1_data = new_row_ids.Data();
Arc *new_arcs_data = new_arcs.Data();
int32_t *arc_map_data = nullptr;
if (arc_map) {
*arc_map = Array1<int32_t>(c, new_num_arcs);
arc_map_data = arc_map->Data();
}
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, old_num_arcs, lambda_copy_data, (int32_t arc_idx01)->void {
int32_t state_idx0 = old_row_ids1_data[arc_idx01],
new_arc_idx01 = arc_idx01 + 1 + state_idx0;
// the "+1" above is because we put the self-loop first.
new_row_ids1_data[new_arc_idx01] = state_idx0;
new_arcs_data[new_arc_idx01] = old_arcs_data[arc_idx01];
if (arc_map_data) arc_map_data[new_arc_idx01] = arc_idx01;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, num_states, lambda_set_new_data, (int32_t state_idx0)->void {
int32_t old_arc_idx0x = old_row_splits1_data[state_idx0],
new_arc_idx0x = old_arc_idx0x + state_idx0;
new_row_splits1_data[state_idx0] = new_arc_idx0x;
if (state_idx0 + 1 < num_states) { // not final-state
int32_t new_arc_idx01 = new_arc_idx0x; // the 1st arc is the loop
new_row_ids1_data[new_arc_idx01] = state_idx0;
new_arcs_data[new_arc_idx01] =
Arc(state_idx0, state_idx0, 0, 0.0);
if (arc_map_data) arc_map_data[new_arc_idx01] = -1;
} else {
// Note: if num_states was zero we would have returned above, so
// we don't have to worry about empty FSAs.
new_row_splits1_data[num_states] = new_arc_idx0x;
}
});
}
pr.Finish();
*dest = Ragged<Arc>(
RaggedShape2(&new_row_splits, &new_row_ids, new_num_arcs), new_arcs);
} else {
K2_CHECK_EQ(src.NumAxes(), 3);
// Get a vector saying, for each FSA, whether it's nonempty.
int32_t num_fsas = src.Dim0(), num_states = src.TotSize(1),
old_num_arcs = src.TotSize(2);
if (num_states == 0) {
*dest = src;
if (arc_map) *arc_map = Array1<int32_t>(c, 0);
return;
}
Array1<int32_t> fsa_nonempty(c, num_fsas + 1);
int32_t *fsa_nonempty_data = fsa_nonempty.Data();
K2_EVAL(
c, num_fsas, lambda_set_fsa_nonempty, (int32_t fsa_idx0)->void {
fsa_nonempty_data[fsa_idx0] = (old_row_splits1_data[fsa_idx0 + 1] >
old_row_splits1_data[fsa_idx0]);
});
ExclusiveSum(fsa_nonempty, &fsa_nonempty);
const int32_t *old_row_splits2_data = src.RowSplits(2).Data(),
*old_row_ids2_data = src.RowIds(2).Data();
int32_t num_nonempty_fsas = fsa_nonempty.Back(),
new_num_arcs = old_num_arcs + num_states - num_nonempty_fsas;
// we subtract `num_nonempty_fsas` because final-states don't get a
// self-loop.
Array1<int32_t> new_row_splits2(c, num_states + 1),
new_row_ids2(c, new_num_arcs);
Array1<Arc> new_arcs(c, new_num_arcs);
// fsa_idx0_mod_data maps from fsa_idx0 to a modified fsa_idx0 that
// "doesn't count" FSAs with zero states.
const int32_t *fsa_idx0_mod_data = fsa_nonempty_data;
int32_t *new_row_splits2_data = new_row_splits2.Data(),
*new_row_ids2_data = new_row_ids2.Data();
Arc *new_arcs_data = new_arcs.Data();
int32_t *arc_map_data = nullptr;
if (arc_map) {
*arc_map = Array1<int32_t>(c, new_num_arcs);
arc_map_data = arc_map->Data();
}
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, old_num_arcs, lambda_copy_data, (int32_t arc_idx012)->void {
int32_t state_idx01 = old_row_ids2_data[arc_idx012],
fsa_idx0 = old_row_ids1_data[state_idx01],
fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0],
new_arc_idx012 =
arc_idx012 + 1 + state_idx01 - fsa_idx0_mod;
// The "+1" above is because we put the self-loop first. The
// "-fsa_idx0_mod" is because final-states don't get a self-loop.
new_row_ids2_data[new_arc_idx012] = state_idx01;
new_arcs_data[new_arc_idx012] = old_arcs_data[arc_idx012];
if (arc_map_data) arc_map_data[new_arc_idx012] = arc_idx012;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, num_states, lambda_set_new_data, (int32_t state_idx01)->void {
int32_t fsa_idx0 = old_row_ids1_data[state_idx01],
fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0],
state_idx0x = old_row_splits1_data[fsa_idx0],
next_state_idx0x = old_row_splits1_data[fsa_idx0 + 1],
old_arc_idx01x = old_row_splits2_data[state_idx01];
// Below the "+ state_idx01" is because each state gets a self-loop,
// and the "- fsa_idx0_mod" is because final-states don't get a
// self-loop.
int32_t new_arc_idx01x =
old_arc_idx01x + state_idx01 - fsa_idx0_mod;
// The self-loop arc is the first arc:
int32_t new_arc_idx012 = new_arc_idx01x;
new_row_splits2_data[state_idx01] = new_arc_idx01x;
if (state_idx01 + 1 < next_state_idx0x) { // not final-state
new_row_ids2_data[new_arc_idx012] = state_idx01;
int32_t state_idx1 = state_idx01 - state_idx0x;
new_arcs_data[new_arc_idx012] =
Arc(state_idx1, state_idx1, 0, 0.0);
if (arc_map_data) arc_map_data[new_arc_idx012] = -1;
} else if (state_idx01 + 1 == num_states) {
// Note: if num_states was zero we would have returned above, so
// we dont have to worry about an empty FsaVec.
new_row_splits2_data[num_states] = new_arc_idx01x;
}
});
}
pr.Finish();
*dest =
Ragged<Arc>(RaggedShape3(&src.RowSplits(1), &src.RowIds(1), num_states,
&new_row_splits2, &new_row_ids2, new_num_arcs),
new_arcs);
}
}
Fsa Union(FsaVec &fsas, Array1<int32_t> *arc_map /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(fsas.NumAxes(), 3);
ContextPtr &context = fsas.Context();
const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data();
const int32_t *fsas_row_splits2_data = fsas.RowSplits(2).Data();
const int32_t *fsas_row_ids1_data = fsas.RowIds(1).Data();
const int32_t *fsas_row_ids2_data = fsas.RowIds(2).Data();
const Arc *arcs_data = fsas.values.Data();
int32_t num_fsas = fsas.Dim0();
int32_t num_states = fsas.TotSize(1);
int32_t num_arcs = fsas.TotSize(2);
// A new start state and a new final state are added (+2).
// The final state of each fsa is removed (-num_fsas)
int32_t num_out_states = num_states + 2 - num_fsas;
int32_t out_final_state = num_out_states - 1;
// For every fsa, a new arc is added from the new start state
// to its original start state (+num_fsas)
int32_t num_out_arcs = num_arcs + num_fsas;
Array1<int32_t> out_row_ids(context, num_out_arcs);
Array1<Arc> out_arcs(context, num_out_arcs);
Array1<int32_t> tmp_arc_map(context, num_out_arcs, -1);
int32_t *tmp_arc_map_data = tmp_arc_map.Data();
int32_t *out_row_ids_data = out_row_ids.Data();
Arc *out_arcs_data = out_arcs.Data();
K2_EVAL(
context, num_arcs, lambda_set_out, (int32_t fsas_arc_idx012) {
int32_t fsas_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012];
int32_t fsas_idx0 = fsas_row_ids1_data[fsas_state_idx01];
int32_t this_fsa_final_state_idx01 =
fsas_row_splits1_data[fsas_idx0 + 1] - 1;
K2_DCHECK_GT(this_fsa_final_state_idx01, fsas_state_idx01)
<< "We support only FSAs with at least two states at present";
int32_t fsas_state_idx0x = fsas_row_splits1_data[fsas_idx0];
int32_t fsas_state_idx1 = fsas_state_idx01 - fsas_state_idx0x;
int32_t this_fsa_final_state_idx1 =
this_fsa_final_state_idx01 - fsas_state_idx0x;
int32_t fsas_arc_idx0xx = fsas_row_splits2_data[fsas_state_idx0x];
// fsa0: +1 (a new start state)
// fsa1: +0 (the final state of fsa0 is removed)
// fsa2: -1 (the final state of fsa1 is removed)
// fsa3: -2 (the final state of fsa2 is removed)
int32_t state_offset = 1 - fsas_idx0;
int32_t out_state_idx0 = fsas_state_idx01 + state_offset;
int32_t out_arc_idx01 = fsas_arc_idx012 + num_fsas;
out_row_ids_data[out_arc_idx01] = out_state_idx0;
Arc arc = arcs_data[fsas_arc_idx012];
K2_DCHECK_EQ(arc.src_state, fsas_state_idx1);
if (arc.dest_state == this_fsa_final_state_idx1)
arc.dest_state = out_final_state;
else
arc.dest_state = arc.dest_state - arc.src_state + out_state_idx0;
arc.src_state = out_state_idx0;
out_arcs_data[out_arc_idx01] = arc;
tmp_arc_map_data[out_arc_idx01] = fsas_arc_idx012;
if (fsas_arc_idx0xx == fsas_arc_idx012) {
// add a new arc from the new start state to the start state
// of this fsa
//
// WARNING: we cannot use fsas_state_idx01 here
// since the start state may have no leaving arcs!
Arc arc(0, fsas_state_idx0x + state_offset, 0, 0);
out_arcs_data[fsas_idx0] = arc;
out_row_ids_data[fsas_idx0] = 0;
}
});
if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map);
Array1<int32_t> out_row_splits(context, num_out_states + 1);
RowIdsToRowSplits(out_row_ids, &out_row_splits);
RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs);
Fsa ans = Ragged<Arc>(shape, out_arcs);
return ans;
}
Fsa Closure(Fsa &fsa, Array1<int32_t> *arc_map /* = nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(fsa.NumAxes(), 2) << "We support only a single FSA.";
ContextPtr &c = fsa.Context();
int32_t num_states = fsa.Dim0();
if (num_states < 2) {
K2_CHECK_EQ(num_states, 0)
<< "An empty fsa should contain no states at all";
if (arc_map != nullptr) *arc_map = Array1<int32_t>(c, 0);
return fsa; // return itself if the input fsa is empty
}
const int32_t *fsa_row_splits_data = fsa.RowSplits(1).Data();
const int32_t *fsa_row_ids_data = fsa.RowIds(1).Data();
const Arc *fsa_arcs_data = fsa.values.Data();
int32_t fsa_final_state = num_states - 1;
int32_t num_out_states = num_states;
// An arc from the start state to the final state with label == -1 is added.
int32_t num_out_arcs = fsa.values.Dim() + 1;
Array1<int32_t> out_row_ids(c, num_out_arcs);
int32_t *out_row_ids_data = out_row_ids.Data();
Array1<Arc> out_arcs(c, num_out_arcs);
Arc *out_arcs_data = out_arcs.Data();
Array1<int32_t> tmp_arc_map(c, num_out_arcs);
int32_t *tmp_arc_map_data = tmp_arc_map.Data();
K2_EVAL(
c, fsa.values.Dim(), lambda_set_arcs, (int32_t fsa_arc_idx01) {
int32_t fsa_state_idx0 = fsa_row_ids_data[fsa_arc_idx01];
int32_t fsa_arc_idx0x = fsa_row_splits_data[fsa_state_idx0];
int32_t fsa_arc_idx1 = fsa_arc_idx01 - fsa_arc_idx0x;
int32_t this_state_num_arcs =
fsa_row_splits_data[fsa_state_idx0 + 1] - fsa_arc_idx0x;
Arc arc = fsa_arcs_data[fsa_arc_idx01];
if (arc.dest_state == fsa_final_state) {
// modify arcs entering the final state such that:
// - dest_state == 0
// - label == 0
arc.dest_state = 0;
K2_DCHECK_EQ(arc.label, -1);
arc.label = 0;
}
int out_arc_idx01;
if (arc.src_state > 0) {
// this arc is not originated from the start state, so its index is
// incremented
out_arc_idx01 = fsa_arc_idx01 + 1;
} else {
out_arc_idx01 = fsa_arc_idx01;
if (fsa_arc_idx1 == this_state_num_arcs - 1) {
// This is the last arc of the original start state,
// so we add a new arc just after it.
Arc new_arc(0, fsa_final_state, -1, 0.0f);
out_arcs_data[out_arc_idx01 + 1] = new_arc;
out_row_ids_data[out_arc_idx01 + 1] = 0;
tmp_arc_map_data[out_arc_idx01 + 1] = -1;
}
}
// it may happen that the start state has no leaving arcs
if (fsa_row_splits_data[1] == 0) {
Arc new_arc(0, fsa_final_state, -1, 0.0f);
out_arcs_data[0] = new_arc;
out_row_ids_data[0] = 0;
tmp_arc_map_data[0] = -1;
}
tmp_arc_map_data[out_arc_idx01] = fsa_arc_idx01;
out_arcs_data[out_arc_idx01] = arc;
out_row_ids_data[out_arc_idx01] = arc.src_state;
});
if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map);
Array1<int32_t> out_row_splits(c, num_out_states + 1);
int32_t *out_row_splits_data = out_row_splits.Data();
K2_EVAL(
c, out_row_splits.Dim(), lambda_set_row_splits, (int32_t i) {
if (i == 0)
out_row_splits_data[i] = 0;
else
out_row_splits_data[i] = fsa_row_splits_data[i] + 1;
});
RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs);
Fsa ans = Ragged<Arc>(shape, out_arcs);
return ans;
}
FsaOrVec ExpandArcs(FsaOrVec &fsas, RaggedShape &labels_shape,
Array1<int32_t> *fsas_arc_map /*=nullptr*/,
Array1<int32_t> *labels_arc_map /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
if (fsas.NumAxes() == 2) {
FsaVec fsas_temp = FsaToFsaVec(fsas);
return ExpandArcs(fsas_temp, labels_shape, fsas_arc_map, labels_arc_map)
.RemoveAxis(0);
}
K2_CHECK_EQ(fsas.NumAxes(), 3);
K2_CHECK_EQ(labels_shape.NumAxes(), 2);
K2_CHECK_EQ(fsas.NumElements(), labels_shape.Dim0());
ContextPtr &c = fsas.Context();
K2_CHECK(c->IsCompatible(*labels_shape.Context()));
RaggedShape state_to_arcs = GetLayer(fsas.shape, 1);
// `state_to_foo` is a RaggedShape that, for each state in `fsas`, has a list
// of length `num_arcs + 1`, where `num_arcs` is the number of arcs leaving
// this state in `fsas`. Interpret this as: one element for the state
// itself, then one for each arc leaving it. This `foo` is an index that
// corresponds to num-arcs plus one, but because it is really a placeholder
// and we want to keep it distinct from other things, we call it `foo`.
RaggedShape state_to_foo = ChangeSublistSize(state_to_arcs, 1);
int32_t foo_size = state_to_foo.NumElements();
// For each element of `state_to_foo`, `num_ostates_for` says how many states
// there will be for this (state,foo) in the returned (output) FSA. Here, the
// idx0 is the state, the idx1 is foo. If idx1 == 0 (interpret this as "the
// state itself"), then `num_ostates_for[idx01] = 1`, meaning "keep the
// original state". Otherwise, idx1 - 1 represents an arc_idx2 [into `fsas`],
// and we set `num_ostates_for[idx01] = max(0, seq_len-1)`, where seq_len is
// the length of the sequence in `labels_shape` corresponding to this
// arc-index.
Array1<int32_t> num_ostates_for(c, foo_size + 1);
int32_t *num_ostates_for_data = num_ostates_for.Data();
const int32_t *labels_row_splits1_data = labels_shape.RowSplits(1).Data(),
*fsas_row_splits2_data = fsas.RowSplits(2).Data(),
*state_to_foo_row_splits1_data =
state_to_foo.RowSplits(1).Data(),
*state_to_foo_row_ids1_data = state_to_foo.RowIds(1).Data();
K2_EVAL(
c, foo_size, lambda_set_num_ostates, (int32_t idx01)->void {
// note: the idx01, idx0, idx0x are into `state_to_foo`.
// This idx0 is a state-index into `fsas` (an idx01 w.r.t. `fsas`).
int32_t idx0 = state_to_foo_row_ids1_data[idx01],
idx0x = state_to_foo_row_splits1_data[idx0],
idx1 = idx01 - idx0x; // idx1 is `foo`.
int32_t num_ostates;
if (idx1 == 0) {
num_ostates = 1; // this is a copy of the original state.
} else {
int32_t fsas_arc_idx2 = idx1 - 1, fsas_state_idx01 = idx0,
fsas_arc_idx01x = fsas_row_splits2_data[fsas_state_idx01],
fsas_arc_idx012 = fsas_arc_idx01x + fsas_arc_idx2,
labels_shape_idx0 = fsas_arc_idx012,
labels_shape_idx0x =
labels_row_splits1_data[labels_shape_idx0],
labels_shape_idx0x_next =
labels_row_splits1_data[labels_shape_idx0 + 1],
labels_shape_len1 =
labels_shape_idx0x_next - labels_shape_idx0x;
// A sequence of n symbols will require n-1 extra states to represent
// it.
num_ostates = max(labels_shape_len1 - 1, (int32_t)0);
}
num_ostates_for_data[idx01] = num_ostates;
});
ExclusiveSum(num_ostates_for, &num_ostates_for);
Array1<int32_t> &foo_to_ostates_row_splits = num_ostates_for;
RaggedShape foo_to_ostates =
RaggedShape2(&foo_to_ostates_row_splits, nullptr, -1);
// to_ostates_shape has 4 axes: [fsa_id][orig_state][foo][ostate]
// where foo is a general-purpose index that ranges over the (num_arcs + 1) of
// the original state.
RaggedShape to_ostates_shape = ComposeRaggedShapes3(
GetLayer(fsas.shape, 0), state_to_foo, foo_to_ostates);
// Below, `tos` means `to_ostates_shape`.
const int32_t *tos_row_splits1_data = to_ostates_shape.RowSplits(1).Data(),
*tos_row_ids1_data = to_ostates_shape.RowIds(1).Data(),
*tos_row_splits2_data = to_ostates_shape.RowSplits(2).Data(),
*tos_row_ids2_data = to_ostates_shape.RowIds(2).Data(),
*tos_row_splits3_data = to_ostates_shape.RowSplits(3).Data(),
*tos_row_ids3_data = to_ostates_shape.RowIds(3).Data();
// `num_oarcs` gives the number of arcs in the returned (output) FSA for each
// `ostate` (i.e. leaving each state in the returned FSA).
int32_t tot_ostates = to_ostates_shape.NumElements();
Array1<int32_t> num_oarcs(c, tot_ostates + 1);
int32_t *num_oarcs_data = num_oarcs.Data();
K2_EVAL(
c, tot_ostates, lambda_set_num_oarcs, (int32_t idx0123)->void {
// All these indexes are into `to_ostates_shape`, indexed
// `[fsa][state][foo][ostate].`
int32_t idx012 = tos_row_ids3_data[idx0123],
idx012x = tos_row_splits3_data[idx012],
idx01 = tos_row_ids2_data[idx012],
idx01x = tos_row_splits2_data[idx01],
idx01x_next = tos_row_splits2_data[idx01 + 1],
len2 = idx01x_next - idx01x, idx2 = idx012 - idx01x,
idx3 = idx0123 - idx012x;
int32_t num_arcs;
if (idx2 == 0) {
K2_CHECK_EQ(idx3, 0);
// This ostate corresponds to the original state; it is not one of the
// extra states added to support chains of arcs.
// The original state had `orig_num_arcs` leaving it, which is the
// number of `foo` indexes minus one.
int32_t orig_num_arcs = len2 - 1;
num_arcs = orig_num_arcs;
} else {
// All newly-created states have exactly one arc leaving them.
num_arcs = 1;
}
num_oarcs_data[idx0123] = num_arcs;
});
ExclusiveSum(num_oarcs, &num_oarcs);
Array1<int32_t> &ostate_to_oarcs_row_splits = num_oarcs;
RaggedShape ostate_to_oarcs =
RaggedShape2(&ostate_to_oarcs_row_splits, nullptr, -1);
// `full_shape` has 5 axes: [fsa][orig_state][foo][ostate][oarc]
RaggedShape full_shape =
ComposeRaggedShapes(to_ostates_shape, ostate_to_oarcs);
// for the lower-order row-splits and row-ids, use tot_row_{splits,idx}n_data
const int32_t *full_row_splits4_data = full_shape.RowSplits(4).Data(),
*full_row_ids4_data = full_shape.RowIds(4).Data();
int32_t tot_oarcs = full_shape.NumElements();
K2_CHECK_GE(tot_oarcs, fsas.NumElements());
int32_t *fsas_arc_map_data = nullptr, *labels_arc_map_data = nullptr;
if (fsas_arc_map) {
*fsas_arc_map = Array1<int32_t>(c, tot_oarcs);
fsas_arc_map_data = fsas_arc_map->Data();
}
if (labels_arc_map) {
*labels_arc_map = Array1<int32_t>(c, tot_oarcs);
labels_arc_map_data = labels_arc_map->Data();
}
Array1<Arc> oarcs(c, tot_oarcs);
Arc *oarcs_data = oarcs.Data();
const Arc *arcs_data = fsas.values.Data();
K2_EVAL(
c, tot_oarcs, lambda_set_arcs, (int32_t idx01234)->void {
// All these indexes are into `full_shape`, indexed
// `[fsa][state][foo][ostate][oarc].`
int32_t idx0123 = full_row_ids4_data[idx01234],
idx0123x = full_row_splits4_data[idx0123],
idx4 = idx01234 - idx0123x, idx012 = tos_row_ids3_data[idx0123],
idx012x = tos_row_splits3_data[idx012],
idx3 = idx0123 - idx012x, idx01 = tos_row_ids2_data[idx012],
idx01x = tos_row_splits2_data[idx01], idx2 = idx012 - idx01x,
idx0 = tos_row_ids1_data[idx01],
idx0x = tos_row_splits1_data[idx0],
idx0xxx = tos_row_splits3_data[tos_row_splits2_data[idx0x]];
int32_t fsa_idx01x = fsas_row_splits2_data[idx01];
int32_t fsa_idx2; // the idx2 (arc-index) into `fsas` of the input arc
// that's most relevant to us..
int32_t seq_pos; // seq_pos is our index into the sequence of arcs that
// we produce for each original arc
if (idx2 == 0) {
K2_CHECK_EQ(idx3, 0);
fsa_idx2 = idx4; // corresponds to foo=0, so idx3 will be 0; the idx4
// enumerates the arcs leaving it..
seq_pos = 0;
} else {
// this is one of the extra `foo` indexes, one per arc in the input
// FSA that leaves this state; each of those `foo` indexes has
// (seq_len - 1) states in it (idx3=0,1..seq_len-1); and each state
// has one arc leaving it (idx4==0).
K2_CHECK_EQ(idx4, 0);
fsa_idx2 = idx2 - 1;
seq_pos = idx3 + 1;
}
int32_t fsa_idx012 = fsa_idx01x + fsa_idx2; // index of the arc in
// source FSA FSA that
// we're expanding..
Arc iarc = arcs_data[fsa_idx012];
int32_t labels_idx0x = labels_row_splits1_data[fsa_idx012],
labels_next_idx0x = labels_row_splits1_data[fsa_idx012 + 1],
labels_len1 = labels_next_idx0x - labels_idx0x;
// labels_len1 is length of label sequence for this arc
K2_CHECK_LT(seq_pos, max(int32_t(1), labels_len1));
int32_t dest_idx01 = idx0x + iarc.dest_state, // original destination
// state-index
orig_dest_idx0123 =
tos_row_splits3_data[tos_row_splits2_data[dest_idx01]];
Arc oarc;
oarc.src_state = idx0123 - idx0xxx;
// If this is the last arc in the sequence, the dest-state is the
// original dest-state of the arc. Otherwise the dest-state is one of
// the new states that we created. The idx123 will be an idx1 after
// removing axes.
int32_t dest_idx123;
if (seq_pos + 1 >= labels_len1) { // last arc in sequence..
dest_idx123 = orig_dest_idx0123 - idx0xxx;
} else {
int32_t dest_state_idx2 = fsa_idx2 + 1, // index `foo` equals
// orig_arc_idx+1
dest_state_idx3 = seq_pos, // ostate index..
dest_idx012 = idx01x + dest_state_idx2,
dest_idx012x = tos_row_splits3_data[dest_idx012],
dest_idx0123 = dest_idx012x + dest_state_idx3;
dest_idx123 = dest_idx0123 - idx0xxx;
}
oarc.dest_state = dest_idx123; // indexes 1,2,3 will be combined; in
// the output FSA it will be an idx1.
if (fsas_arc_map_data)
fsas_arc_map_data[idx01234] = (seq_pos == 0 ? fsa_idx012 : -1);
if (labels_arc_map_data)
labels_arc_map_data[idx01234] =
(seq_pos < labels_len1 ? labels_idx0x + seq_pos : -1);
if (iarc.label != -1) {
// normal case.. label goes on 1st arc in sequence
oarc.label = (seq_pos == 0 ? iarc.label : 0);
} else {
// If the arc was to the final-state, we need to keep the label on the
// last arc of the sequence to keep the output valid. The following
// would be "seq_pos + 1 == labels_len1 ? -1 : 0", but we make it ">="
// not "=" to account for the case seq_pos=0, labels_len1 = 0.
oarc.label = (seq_pos + 1 >= labels_len1 ? -1 : 0);
}
oarc.score = (seq_pos == 0 ? iarc.score : 0.0);
oarcs_data[idx01234] = oarc;
});
// remove current axes 1 and 2... [after removing axis 1, old axis 2 becomes
// axis 1, so remove axis 1 twice].
RaggedShape temp = RemoveAxis(full_shape, 1);
return FsaVec(RemoveAxis(temp, 1), oarcs);
}
void Invert(FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest,
Ragged<int32_t> *dest_aux_labels,
Array1<int32_t> *arc_map /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src_aux_labels.NumAxes(), 2);
K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements());
K2_CHECK(dest != nullptr && dest_aux_labels != nullptr);
ContextPtr c = GetContext(src, src_aux_labels);
if (src.NumAxes() == 2) {
Fsa *srcs = &src;
FsaVec src_vec = CreateFsaVec(1, &srcs), dest_vec;
Invert(src_vec, src_aux_labels, &dest_vec, dest_aux_labels, arc_map);
*dest = GetFsaVecElement(dest_vec, 0);
return;
}
Array1<int32_t> src_arc_map, labels_arc_map;
*dest = ExpandArcs(src, src_aux_labels.shape, &src_arc_map, &labels_arc_map);
// swap labels and aux_labels
int32_t dest_num_arcs = dest->NumElements();
Arc *dest_arcs_data = dest->values.Data();
const int32_t *labels_arc_map_data = labels_arc_map.Data(),
*src_aux_labels_data = src_aux_labels.values.Data();
Array1<int32_t> dest_aux_labels_row_splits(c, dest_num_arcs + 1);
int32_t *dest_aux_labels_row_splits_data = dest_aux_labels_row_splits.Data();
K2_EVAL(
c, dest_num_arcs, lambda_set_dest_aux_labels_num,
(int32_t dest_idx012)->void {
Arc &dest_arc = dest_arcs_data[dest_idx012];
// we'll remove epsilons in dest_aux_labels
dest_aux_labels_row_splits_data[dest_idx012] =
dest_arc.label == 0 ? 0 : 1;
});
ExclusiveSum(dest_aux_labels_row_splits.Arange(0, dest_num_arcs),
&dest_aux_labels_row_splits);
RaggedShape dest_aux_labels_shape =
RaggedShape2(&dest_aux_labels_row_splits, nullptr, -1);
Array1<int32_t> dest_aux_labels_values(c,
dest_aux_labels_shape.NumElements());
int32_t *dest_aux_labels_values_data = dest_aux_labels_values.Data();
K2_EVAL(
c, dest_num_arcs, lambda_set_dest_labels_and_aux_labels,
(int32_t dest_idx012)->void {
Arc &dest_arc = dest_arcs_data[dest_idx012];
// swap label and aux_label
if (dest_arc.label != 0) {
int32_t dest_aux_labels_idx0x =
dest_aux_labels_row_splits_data[dest_idx012];
// every arc in dest has at most one aux_label (as the aux_label is
// the label of src on this arc)
dest_aux_labels_values_data[dest_aux_labels_idx0x] = dest_arc.label;
}
int32_t src_aux_labels_idx01 = labels_arc_map_data[dest_idx012];
dest_arc.label = src_aux_labels_idx01 == -1
? 0
: src_aux_labels_data[src_aux_labels_idx01];
});
*dest_aux_labels =
Ragged<int32_t>(dest_aux_labels_shape, dest_aux_labels_values);
if (arc_map != nullptr) *arc_map = src_arc_map;
}
// Will be used in InvertHost to process FsaVec input recursively.
void RecursionWrapperAuxLabels(void (*f)(FsaOrVec &, Ragged<int32_t> &,
FsaOrVec *, Ragged<int32_t> *),
FsaOrVec &src, Ragged<int32_t> &src_aux_labels,
FsaOrVec *dest,
Ragged<int32_t> *dest_aux_labels) {
NVTX_RANGE(K2_FUNC);
// src is actually an FsaVec. Just recurse for now.
K2_CHECK_EQ(src.NumAxes(), 3);
int32_t num_fsas = src.shape.Dim0();
std::vector<Fsa> srcs(num_fsas), dests(num_fsas);
std::vector<Ragged<int32_t>> src_aux_labels_vec(num_fsas),
dest_aux_labels_vec(num_fsas);
int32_t tot_num_arcs = 0;
Array1<int32_t> src_aux_labels_row_splits = src_aux_labels.RowSplits(1),
src_aux_labels_values = src_aux_labels.values;
for (int32_t i = 0; i < num_fsas; ++i) {
srcs[i] = src.Index(0, i);
int32_t cur_num_arcs = srcs[i].NumElements();
// below block get aux_labels for srcs[i]
// TODO(haowen): replace with Range op for ragged
{
Array1<int32_t> row_splits = src_aux_labels_row_splits.Arange(
tot_num_arcs, tot_num_arcs + cur_num_arcs + 1);
Array1<int32_t> values =
src_aux_labels_values.Arange(row_splits[0], row_splits.Back());
row_splits = Minus(row_splits, row_splits[0]);
RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1);
src_aux_labels_vec[i] = Ragged<int32_t>(shape, values);
}
f(srcs[i], src_aux_labels_vec[i], &(dests[i]), &(dest_aux_labels_vec[i]));
tot_num_arcs += cur_num_arcs;
}
*dest = Stack(0, num_fsas, dests.data());
*dest_aux_labels = Cat(0, num_fsas, dest_aux_labels_vec.data());
}
void InvertHost(FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest,
Ragged<int32_t> *dest_aux_labels) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src_aux_labels.NumAxes(), 2);
K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements());
K2_CHECK(dest != nullptr && dest_aux_labels != nullptr);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapperAuxLabels(InvertHost, src, src_aux_labels, dest,
dest_aux_labels);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
// k2host::AuxLabels is a k2host::Array2
k2host::AuxLabels host_aux_labels(
src_aux_labels.Dim0(), src_aux_labels.NumElements(),
src_aux_labels.RowSplits(1).Data(), src_aux_labels.values.Data());
k2host::FstInverter inverter(host_fsa, host_aux_labels);
k2host::Array2Size<int32_t> fsa_size, aux_size;
inverter.GetSizes(&fsa_size, &aux_size);
FsaCreator fsa_creator(fsa_size);
k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa();
Ragged2Creator<int32_t> ragged_creator(aux_size);
k2host::AuxLabels host_dest_aux_labels = ragged_creator.GetHostArray2();
inverter.GetOutput(&host_dest_fsa, &host_dest_aux_labels);
*dest = fsa_creator.GetFsa();
*dest_aux_labels = ragged_creator.GetRagged2();
}
FsaOrVec RemoveEpsilonSelfLoops(FsaOrVec &src,
Array1<int32_t> *arc_map /* = nullptr */) {
NVTX_RANGE(K2_FUNC);
if (src.NumAxes() == 2) {
FsaVec temp = FsaToFsaVec(src);
return RemoveEpsilonSelfLoops(temp, arc_map).RemoveAxis(0);
}
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &c = src.Context();
int32_t num_arcs = src.NumElements();
Renumbering renumber_lists(c, num_arcs);
char *keep_list_data = renumber_lists.Keep().Data();
const Arc *arcs_data = src.values.Data();
K2_EVAL(
c, num_arcs, lambda_set_keep, (int32_t i)->void {
Arc arc = arcs_data[i];
char keep;
if (arc.label == 0 && arc.src_state == arc.dest_state) {
// This arc is an epsilon self-loop, so it should be removed
keep = 0;
} else {
keep = 1;
}
keep_list_data[i] = keep;
});
FsaVec ans = Index(src, 2, renumber_lists.New2Old(), arc_map);
return ans;
}
} // namespace k2
| 015fb4fb69db385a682202dd7019944ec4ca98ef.cu | /**
* Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu)
* Mobvoi Inc. (authors: Fangjun Kuang)
*
* See LICENSE for clarification regarding multiple authors
*/
#include <algorithm>
#include <limits>
#include <memory>
#include <type_traits>
#include <utility>
#include <vector>
#include "k2/csrc/array_ops.h"
#include "k2/csrc/fsa_algo.h"
#include "k2/csrc/fsa_utils.h"
#include "k2/csrc/host/aux_labels.h"
#include "k2/csrc/host/connect.h"
#include "k2/csrc/host/determinize.h"
#include "k2/csrc/host/intersect.h"
#include "k2/csrc/host/rmepsilon.h"
#include "k2/csrc/host/topsort.h"
#include "k2/csrc/host_shim.h"
#include "k2/csrc/macros.h"
#include "k2/csrc/rm_epsilon.h"
// this contains a subset of the algorithms in fsa_algo.h; currently it just
// contains one that are wrappings of the corresponding algorithms in
// host/.
namespace k2 {
bool RecursionWrapper(bool (*f)(Fsa &, Fsa *, Array1<int32_t> *), Fsa &src,
Fsa *dest, Array1<int32_t> *arc_map) {
NVTX_RANGE(K2_FUNC);
// src is actually an FsaVec. Just recurse for now.
int32_t num_fsas = src.shape.Dim0();
std::vector<Fsa> srcs(num_fsas), dests(num_fsas);
std::vector<Array1<int32_t>> arc_maps(num_fsas);
int32_t tot_num_arcs = 0;
for (int32_t i = 0; i < num_fsas; ++i) {
srcs[i] = src.Index(0, i);
// Recurse.
if (!f(srcs[i], &(dests[i]),
(arc_map != nullptr ? &(arc_maps[i]) : nullptr)))
return false;
if (arc_map != nullptr) {
// convert arc indexes in arc_maps from idx2 to idx012
arc_maps[i] = Plus(arc_maps[i], tot_num_arcs);
tot_num_arcs += srcs[i].NumElements();
}
}
*dest = Stack(0, num_fsas, dests.data());
if (arc_map != nullptr)
*arc_map = Cat(src.Context(), num_fsas, arc_maps.data());
return true;
}
bool Connect(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapper(Connect, src, dest, arc_map);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
k2host::Connection c(host_fsa);
k2host::Array2Size<int32_t> size;
c.GetSizes(&size);
FsaCreator creator(size);
k2host::Fsa host_dest_fsa = creator.GetHostFsa();
int32_t *arc_map_data = nullptr;
if (arc_map != nullptr) {
*arc_map = Array1<int32_t>(src.Context(), size.size2);
arc_map_data = arc_map->Data();
}
bool ans = c.GetOutput(&host_dest_fsa, arc_map_data);
*dest = creator.GetFsa();
return ans;
}
bool TopSortHost(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapper(TopSortHost, src, dest, arc_map);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
k2host::TopSorter sorter(host_fsa);
k2host::Array2Size<int32_t> size;
sorter.GetSizes(&size);
FsaCreator creator(size);
k2host::Fsa host_dest_fsa = creator.GetHostFsa();
int32_t *arc_map_data = nullptr;
if (arc_map != nullptr) {
*arc_map = Array1<int32_t>(src.Context(), size.size2);
arc_map_data = arc_map->Data();
}
bool ans = sorter.GetOutput(&host_dest_fsa, arc_map_data);
*dest = creator.GetFsa();
return ans;
}
bool Intersect(FsaOrVec &a_fsas, int32_t properties_a, FsaOrVec &b_fsas,
int32_t properties_b, bool treat_epsilons_specially, FsaVec *out,
Array1<int32_t> *arc_map_a, Array1<int32_t> *arc_map_b) {
NVTX_RANGE(K2_FUNC);
K2_CHECK(a_fsas.NumAxes() >= 2 && a_fsas.NumAxes() <= 3);
K2_CHECK(b_fsas.NumAxes() >= 2 && b_fsas.NumAxes() <= 3);
ContextPtr c = a_fsas.Context();
K2_CHECK_EQ(c->GetDeviceType(), kCpu);
if (a_fsas.NumAxes() == 2) {
FsaVec a_fsas_vec = FsaToFsaVec(a_fsas);
return Intersect(a_fsas_vec, properties_a, b_fsas, properties_b,
treat_epsilons_specially, out, arc_map_a, arc_map_b);
}
if (b_fsas.NumAxes() == 2) {
FsaVec b_fsas_vec = FsaToFsaVec(b_fsas);
return Intersect(a_fsas, properties_a, b_fsas_vec, properties_b,
treat_epsilons_specially, out, arc_map_a, arc_map_b);
}
int32_t num_fsas_a = a_fsas.Dim0(), num_fsas_b = b_fsas.Dim0();
K2_CHECK_GT(num_fsas_a, 0);
K2_CHECK_GT(num_fsas_b, 0);
int32_t stride_a = 1, stride_b = 1;
if (num_fsas_a != num_fsas_b) {
if (num_fsas_a == 1) {
stride_a = 0;
} else if (num_fsas_b == 1) {
stride_b = 0;
} else {
K2_CHECK_EQ(num_fsas_a, num_fsas_b);
}
// the check on the previous line will fail.
}
if (properties_a < 0) {
Array1<int32_t> properties_a_out(c, num_fsas_a);
GetFsaVecBasicProperties(a_fsas, &properties_a_out, &properties_a);
}
if (properties_b < 0) {
Array1<int32_t> properties_b_out(c, num_fsas_b);
GetFsaVecBasicProperties(b_fsas, &properties_b_out, &properties_b);
}
bool arc_sorted = (properties_a & kFsaPropertiesArcSorted) &&
(properties_b & kFsaPropertiesArcSorted);
K2_CHECK(arc_sorted) << "Both a_fsas and b_fsas should be arc-sorted";
int32_t num_fsas = std::max(num_fsas_a, num_fsas_b);
std::vector<std::unique_ptr<k2host::Intersection>> intersections(num_fsas);
std::vector<k2host::Array2Size<int32_t>> sizes(num_fsas);
for (int32_t i = 0; i < num_fsas; ++i) {
k2host::Fsa host_fsa_a = FsaVecToHostFsa(a_fsas, i * stride_a),
host_fsa_b = FsaVecToHostFsa(b_fsas, i * stride_b);
intersections[i] = std::make_unique<k2host::Intersection>(
host_fsa_a, host_fsa_b, treat_epsilons_specially, false);
intersections[i]->GetSizes(&(sizes[i]));
}
FsaVecCreator creator(sizes);
int32_t num_arcs = creator.NumArcs();
if (arc_map_a) *arc_map_a = Array1<int32_t>(c, num_arcs);
if (arc_map_b) *arc_map_b = Array1<int32_t>(c, num_arcs);
// the following few lines will allow us to add suitable offsets to the
// `arc_map`.
Array1<int32_t> a_fsas_row_splits12 =
a_fsas.RowSplits(2)[a_fsas.RowSplits(1)],
b_fsas_row_splits12 =
b_fsas.RowSplits(2)[b_fsas.RowSplits(1)];
const int32_t *a_fsas_row_splits12_data = a_fsas_row_splits12.Data(),
*b_fsas_row_splits12_data = b_fsas_row_splits12.Data();
bool ok = true;
for (int32_t i = 0; i < num_fsas; ++i) {
k2host::Fsa host_fsa_out = creator.GetHostFsa(i);
int32_t arc_offset = creator.GetArcOffsetFor(i);
int32_t *this_arc_map_a =
(arc_map_a ? arc_map_a->Data() + arc_offset : nullptr),
*this_arc_map_b =
(arc_map_b ? arc_map_b->Data() + arc_offset : nullptr);
bool ans = intersections[i]->GetOutput(&host_fsa_out, this_arc_map_a,
this_arc_map_b);
ok = ok && ans;
int32_t this_num_arcs = creator.GetArcOffsetFor(i + 1) - arc_offset;
if (arc_map_a) {
int32_t arc_offset_a = a_fsas_row_splits12_data[i * stride_a];
for (int32_t i = 0; i < this_num_arcs; i++)
this_arc_map_a[i] += arc_offset_a;
}
if (arc_map_b) {
int32_t arc_offset_b = b_fsas_row_splits12_data[i * stride_b];
for (int32_t i = 0; i < this_num_arcs; i++)
this_arc_map_b[i] += arc_offset_b;
}
}
*out = creator.GetFsaVec();
return ok;
}
// Will be used in RemoveEpsilonHost and Determinize below to process FsaVec
// input recursively.
void RecursionWrapper(void (*f)(FsaOrVec &, FsaOrVec *, Ragged<int32_t> *),
FsaOrVec &src, FsaOrVec *dest,
Ragged<int32_t> *arc_deriv) {
NVTX_RANGE(K2_FUNC);
// src is actually an FsaVec. Just recurse for now.
K2_CHECK_EQ(src.NumAxes(), 3);
int32_t num_fsas = src.shape.Dim0();
std::vector<Fsa> srcs(num_fsas), dests(num_fsas);
std::vector<Ragged<int32_t>> arc_derivs(num_fsas);
int32_t tot_num_arcs = 0;
for (int32_t i = 0; i < num_fsas; ++i) {
srcs[i] = src.Index(0, i);
f(srcs[i], &(dests[i]), arc_deriv != nullptr ? &(arc_derivs[i]) : nullptr);
if (arc_deriv != nullptr) {
// convert arc indexes in arc_derivs from idx2 to idx012
Array1<int32_t> &values = arc_derivs[i].values;
values = Plus(values, tot_num_arcs);
tot_num_arcs += srcs[i].NumElements();
}
}
*dest = Stack(0, num_fsas, dests.data());
if (arc_deriv != nullptr) *arc_deriv = Cat(0, num_fsas, arc_derivs.data());
}
void RemoveEpsilonHost(FsaOrVec &src, FsaOrVec *dest,
Ragged<int32_t> *arc_derivs /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapper(RemoveEpsilonHost, src, dest, arc_derivs);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
int32_t num_states = host_fsa.NumStates();
K2_CHECK_EQ(num_states, src.Dim0());
std::vector<double> max_forward_weights(num_states);
std::vector<double> max_backward_weights(num_states);
k2host::WfsaWithFbWeights max_wfsa(host_fsa, k2host::kMaxWeight,
max_forward_weights.data(),
max_backward_weights.data());
// pass infinity as beam since we don't do pruning here.
float beam = std::numeric_limits<float>::infinity();
k2host::EpsilonsRemoverPrunedMax eps_remover(max_wfsa, beam);
k2host::Array2Size<int32_t> fsa_size, arc_derivs_size;
eps_remover.GetSizes(&fsa_size, &arc_derivs_size);
FsaCreator fsa_creator(fsa_size);
k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa();
K2_STATIC_ASSERT(
(std::is_same<k2host::MaxTracebackState::DerivType, int32_t>::value));
Ragged2Creator<int32_t> ragged_creator(arc_derivs_size);
k2host::Array2<int32_t *, int32_t> host_arc_derivs =
ragged_creator.GetHostArray2();
eps_remover.GetOutput(&host_dest_fsa, &host_arc_derivs);
*dest = fsa_creator.GetFsa();
if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2();
}
void RemoveEpsilon(FsaOrVec &src, int32_t properties,
FsaOrVec *dest,
Ragged<int32_t> *arc_derivs) {
if ((properties & kFsaPropertiesTopSortedAndAcyclic) != 0 &&
src.Context()->GetDeviceType() == kCpu) {
// Host version of the algorithm
RemoveEpsilonHost(src, dest, arc_derivs);
} else {
RemoveEpsilonDevice(src, dest, arc_derivs);
}
}
void RemoveEpsilonAndAddSelfLoops(FsaOrVec &src, int32_t properties,
FsaOrVec *dest,
Ragged<int32_t> *arc_derivs) {
NVTX_RANGE(K2_FUNC);
Ragged<int32_t> arc_derivs1;
FsaOrVec temp;
RemoveEpsilon(src, properties, &temp,
(arc_derivs != nullptr ? &arc_derivs1 : nullptr));
Array1<int32_t> arc_derivs2;
AddEpsilonSelfLoops(temp, dest,
(arc_derivs != nullptr ? &arc_derivs2 : nullptr));
if (arc_derivs != nullptr) {
*arc_derivs = Index(arc_derivs1, 0, arc_derivs2, nullptr);
}
}
void Determinize(FsaOrVec &src, FsaOrVec *dest,
Ragged<int32_t> *arc_derivs /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapper(Determinize, src, dest, arc_derivs);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
int32_t num_states = host_fsa.NumStates();
K2_CHECK_EQ(num_states, src.Dim0());
int32_t max_step = -1; // no limit
k2host::DeterminizerMax determinizer(host_fsa, max_step);
k2host::Array2Size<int32_t> fsa_size, arc_derivs_size;
determinizer.GetSizes(&fsa_size, &arc_derivs_size);
FsaCreator fsa_creator(fsa_size);
k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa();
K2_STATIC_ASSERT(
(std::is_same<k2host::MaxTracebackState::DerivType, int32_t>::value));
Ragged2Creator<int32_t> ragged_creator(arc_derivs_size);
k2host::Array2<int32_t *, int32_t> host_arc_derivs =
ragged_creator.GetHostArray2();
determinizer.GetOutput(&host_dest_fsa, &host_arc_derivs);
*dest = fsa_creator.GetFsa();
if (arc_derivs != nullptr) *arc_derivs = ragged_creator.GetRagged2();
}
Fsa LinearFsa(const Array1<int32_t> &symbols) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = symbols.Context();
int32_t n = symbols.Dim(), num_states = n + 2, num_arcs = n + 1;
Array1<int32_t> row_splits1 = Range(c, num_states + 1, 0),
row_ids1 = Range(c, num_arcs, 0);
int32_t *row_splits1_data = row_splits1.Data();
Array1<Arc> arcs(c, num_arcs);
Arc *arcs_data = arcs.Data();
const int32_t *symbols_data = symbols.Data();
K2_EVAL(
c, num_arcs, lambda_set_arcs, (int32_t arc_idx01)->void {
int32_t src_state = arc_idx01, dest_state = arc_idx01 + 1,
// -1 == kFinalSymbol
symbol = (arc_idx01 < n ? symbols_data[arc_idx01] : -1);
if (arc_idx01 < n) K2_CHECK_NE(symbol, -1);
float score = 0.0;
arcs_data[arc_idx01] = Arc(src_state, dest_state, symbol, score);
// the final state has no leaving arcs.
if (arc_idx01 == 0) row_splits1_data[num_states] = num_arcs;
});
return Ragged<Arc>(RaggedShape2(&row_splits1, &row_ids1, num_arcs), arcs);
}
FsaVec LinearFsas(const Ragged<int32_t> &symbols) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(symbols.NumAxes(), 2);
ContextPtr &c = symbols.Context();
// if there are n symbols, there are n+2 states and n+1 arcs.
RaggedShape states_shape = ChangeSublistSize(symbols.shape, 2);
int32_t num_states = states_shape.NumElements(),
num_arcs = symbols.NumElements() + symbols.Dim0();
// row_splits2 maps from state_idx01 to arc_idx012; row_ids2 does the reverse.
// We'll set them in the lambda below.
Array1<int32_t> row_splits2(c, num_states + 1), row_ids2(c, num_arcs);
int32_t *row_ids2_data = row_ids2.Data(),
*row_splits2_data = row_splits2.Data();
const int32_t *row_ids1_data = states_shape.RowIds(1).Data(),
*row_splits1_data = states_shape.RowSplits(1).Data(),
*symbols_data = symbols.values.Data();
Array1<Arc> arcs(c, num_arcs);
Arc *arcs_data = arcs.Data();
K2_EVAL(
c, num_states, lambda, (int32_t state_idx01)->void {
int32_t fsa_idx0 = row_ids1_data[state_idx01],
state_idx0x = row_splits1_data[fsa_idx0],
next_state_idx0x = row_splits1_data[fsa_idx0 + 1],
idx1 = state_idx01 - state_idx0x;
// the following works because each FSA has one fewer arcs than states.
int32_t arc_idx0xx = state_idx0x - fsa_idx0,
next_arc_idx0xx = next_state_idx0x - (fsa_idx0 + 1),
// the following may look a bit wrong.. here, the idx1 is the
// same as the idx12 if the arc exists, because each state has
// one arc leaving it (except the last state).
arc_idx012 = arc_idx0xx + idx1;
// the following works because each FSA has one fewer symbols than arcs
// (however it doesn't work for the last arc of each FSA; we check
// below.)
int32_t symbol_idx01 = arc_idx012 - fsa_idx0;
if (arc_idx012 < next_arc_idx0xx) {
int32_t src_state = idx1, dest_state = idx1 + 1,
symbol = (arc_idx012 + 1 < next_arc_idx0xx
? symbols_data[symbol_idx01]
: -1); // kFinalSymbol
float score = 0.0;
arcs_data[arc_idx012] = Arc(src_state, dest_state, symbol, score);
row_ids2_data[arc_idx012] = state_idx01;
} else {
// The following ensures that the last element of row_splits1_data
// (i.e. row_splits1[num_states]) is set to num_arcs. It also writes
// something unnecessary for the last state of each FSA but the last
// one, which will cause 2 threads to write the same item to the same
// location. Note that there is no arc with index `arc_idx01`, if you
// reach here.
row_splits2_data[state_idx01 + 1] = arc_idx012;
}
row_splits2_data[state_idx01] = arc_idx012;
});
return Ragged<Arc>(
RaggedShape3(&states_shape.RowSplits(1), &states_shape.RowIds(1),
num_states, &row_splits2, &row_ids2, num_arcs),
arcs);
}
void ArcSort(Fsa *fsa) {
if (fsa->NumAxes() < 2) return; // it is empty
SortSublists<Arc>(fsa);
}
void ArcSort(Fsa &src, Fsa *dest, Array1<int32_t> *arc_map /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
if (!src.values.IsValid()) return;
if (arc_map != nullptr)
*arc_map = Array1<int32_t>(src.Context(), src.NumElements());
Fsa tmp(src.shape, src.values.Clone());
SortSublists<Arc>(&tmp, arc_map);
*dest = tmp;
}
// TODO(fangjun): use the following method suggested by Dan
//
// ... incidentally, it's possible to further optimize this so the run
// time is less than linear, by using methods similar to what I use
// in GetStateBatches(); imagine computing a table that instead of
// the best traceback, is the best 2-step traceback; and then the 4-step
// traceback, and so on. There's no need for this right now, since the
// forward-pass algorithm is already at least linear-time in the length
// of this path. But we can consider it for the future.
Ragged<int32_t> ShortestPath(FsaVec &fsas,
const Array1<int32_t> &entering_arcs) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(fsas.NumAxes(), 3);
const int32_t *entering_arcs_data = entering_arcs.Data();
const Arc *arcs_data = fsas.values.Data();
int32_t num_fsas = fsas.Dim0();
int32_t num_states = fsas.TotSize(1);
ContextPtr &context = fsas.Context();
// allocate an extra element for ExclusiveSum
Array1<int32_t> num_best_arcs_per_fsa(context, num_fsas + 1);
int32_t *num_best_arcs_per_fsa_data = num_best_arcs_per_fsa.Data();
const int32_t *row_splits1_data = fsas.RowSplits(1).Data();
// -1 represents an invalid arc_index.
// This extra array avoids an extra iteration over `entering_arcs`.
Array1<int32_t> state_best_arc_index_array(context, num_states, -1);
int32_t *state_best_arc_index_array_data = state_best_arc_index_array.Data();
K2_EVAL(
context, num_fsas, lambda_set_num_best_arcs, (int32_t fsas_idx0) {
int32_t state_idx01 = row_splits1_data[fsas_idx0];
int32_t state_idx01_next = row_splits1_data[fsas_idx0 + 1];
if (state_idx01_next == state_idx01) {
// this fsa is empty, so there is no best path available
num_best_arcs_per_fsa_data[fsas_idx0] = 0;
return;
}
int32_t final_state_idx01 = state_idx01_next - 1;
int32_t cur_state = final_state_idx01;
int32_t cur_index = entering_arcs_data[cur_state];
int32_t num_arcs = 0;
int32_t *p = state_best_arc_index_array_data + final_state_idx01;
while (cur_index != -1) {
*p = cur_index;
--p;
cur_state = arcs_data[cur_index].src_state + state_idx01;
cur_index = entering_arcs_data[cur_state];
++num_arcs;
}
num_best_arcs_per_fsa_data[fsas_idx0] = num_arcs;
});
ExclusiveSum(num_best_arcs_per_fsa, &num_best_arcs_per_fsa);
RaggedShape shape = RaggedShape2(&num_best_arcs_per_fsa, nullptr, -1);
const int32_t *shape_row_splits1_data = shape.RowSplits(1).Data();
const int32_t *shape_row_ids1_data = shape.RowIds(1).Data();
const int32_t *ans_row_splits_data = shape.RowSplits(1).Data();
Array1<int32_t> best_path_arc_indexes(context, shape.NumElements());
int32_t *best_path_arc_indexes_data = best_path_arc_indexes.Data();
K2_EVAL(
context, shape.NumElements(), lambda_set_best_arcs, (int32_t ans_idx01) {
int32_t fsa_idx0 = shape_row_ids1_data[ans_idx01];
int32_t ans_idx0x = shape_row_splits1_data[fsa_idx0];
int32_t ans_idx1 = ans_idx01 - ans_idx0x;
int32_t num_arcs_this_fsa = num_best_arcs_per_fsa_data[fsa_idx0 + 1] -
num_best_arcs_per_fsa_data[fsa_idx0];
if (num_arcs_this_fsa == 0) return;
int32_t final_state_idx01_this_fsa = row_splits1_data[fsa_idx0 + 1] - 1;
const int32_t *p_start = state_best_arc_index_array_data +
final_state_idx01_this_fsa -
num_arcs_this_fsa + 1;
best_path_arc_indexes_data[ans_idx01] = p_start[ans_idx1];
});
Ragged<int32_t> ans(shape, best_path_arc_indexes);
return ans;
}
void AddEpsilonSelfLoops(FsaOrVec &src, FsaOrVec *dest,
Array1<int32_t> *arc_map /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
ContextPtr &c = src.Context();
const int32_t *old_row_splits1_data = src.RowSplits(1).Data(),
*old_row_ids1_data = src.RowIds(1).Data();
const Arc *old_arcs_data = src.values.Data();
if (src.NumAxes() == 2) {
int32_t num_states = src.Dim0();
if (num_states < 2) {
K2_CHECK_EQ(num_states, 0);
*dest = src;
if (arc_map != nullptr) *arc_map = Array1<int32_t>(c, 0);
return;
}
int32_t old_num_arcs = src.TotSize(1),
new_num_arcs = old_num_arcs + (num_states - 1);
Array1<int32_t> new_row_splits(c, num_states + 1),
new_row_ids(c, new_num_arcs);
Array1<Arc> new_arcs(c, new_num_arcs);
int32_t *new_row_splits1_data = new_row_splits.Data(),
*new_row_ids1_data = new_row_ids.Data();
Arc *new_arcs_data = new_arcs.Data();
int32_t *arc_map_data = nullptr;
if (arc_map) {
*arc_map = Array1<int32_t>(c, new_num_arcs);
arc_map_data = arc_map->Data();
}
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, old_num_arcs, lambda_copy_data, (int32_t arc_idx01)->void {
int32_t state_idx0 = old_row_ids1_data[arc_idx01],
new_arc_idx01 = arc_idx01 + 1 + state_idx0;
// the "+1" above is because we put the self-loop first.
new_row_ids1_data[new_arc_idx01] = state_idx0;
new_arcs_data[new_arc_idx01] = old_arcs_data[arc_idx01];
if (arc_map_data) arc_map_data[new_arc_idx01] = arc_idx01;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, num_states, lambda_set_new_data, (int32_t state_idx0)->void {
int32_t old_arc_idx0x = old_row_splits1_data[state_idx0],
new_arc_idx0x = old_arc_idx0x + state_idx0;
new_row_splits1_data[state_idx0] = new_arc_idx0x;
if (state_idx0 + 1 < num_states) { // not final-state
int32_t new_arc_idx01 = new_arc_idx0x; // the 1st arc is the loop
new_row_ids1_data[new_arc_idx01] = state_idx0;
new_arcs_data[new_arc_idx01] =
Arc(state_idx0, state_idx0, 0, 0.0);
if (arc_map_data) arc_map_data[new_arc_idx01] = -1;
} else {
// Note: if num_states was zero we would have returned above, so
// we don't have to worry about empty FSAs.
new_row_splits1_data[num_states] = new_arc_idx0x;
}
});
}
pr.Finish();
*dest = Ragged<Arc>(
RaggedShape2(&new_row_splits, &new_row_ids, new_num_arcs), new_arcs);
} else {
K2_CHECK_EQ(src.NumAxes(), 3);
// Get a vector saying, for each FSA, whether it's nonempty.
int32_t num_fsas = src.Dim0(), num_states = src.TotSize(1),
old_num_arcs = src.TotSize(2);
if (num_states == 0) {
*dest = src;
if (arc_map) *arc_map = Array1<int32_t>(c, 0);
return;
}
Array1<int32_t> fsa_nonempty(c, num_fsas + 1);
int32_t *fsa_nonempty_data = fsa_nonempty.Data();
K2_EVAL(
c, num_fsas, lambda_set_fsa_nonempty, (int32_t fsa_idx0)->void {
fsa_nonempty_data[fsa_idx0] = (old_row_splits1_data[fsa_idx0 + 1] >
old_row_splits1_data[fsa_idx0]);
});
ExclusiveSum(fsa_nonempty, &fsa_nonempty);
const int32_t *old_row_splits2_data = src.RowSplits(2).Data(),
*old_row_ids2_data = src.RowIds(2).Data();
int32_t num_nonempty_fsas = fsa_nonempty.Back(),
new_num_arcs = old_num_arcs + num_states - num_nonempty_fsas;
// we subtract `num_nonempty_fsas` because final-states don't get a
// self-loop.
Array1<int32_t> new_row_splits2(c, num_states + 1),
new_row_ids2(c, new_num_arcs);
Array1<Arc> new_arcs(c, new_num_arcs);
// fsa_idx0_mod_data maps from fsa_idx0 to a modified fsa_idx0 that
// "doesn't count" FSAs with zero states.
const int32_t *fsa_idx0_mod_data = fsa_nonempty_data;
int32_t *new_row_splits2_data = new_row_splits2.Data(),
*new_row_ids2_data = new_row_ids2.Data();
Arc *new_arcs_data = new_arcs.Data();
int32_t *arc_map_data = nullptr;
if (arc_map) {
*arc_map = Array1<int32_t>(c, new_num_arcs);
arc_map_data = arc_map->Data();
}
ParallelRunner pr(c);
{
With w(pr.NewStream());
K2_EVAL(
c, old_num_arcs, lambda_copy_data, (int32_t arc_idx012)->void {
int32_t state_idx01 = old_row_ids2_data[arc_idx012],
fsa_idx0 = old_row_ids1_data[state_idx01],
fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0],
new_arc_idx012 =
arc_idx012 + 1 + state_idx01 - fsa_idx0_mod;
// The "+1" above is because we put the self-loop first. The
// "-fsa_idx0_mod" is because final-states don't get a self-loop.
new_row_ids2_data[new_arc_idx012] = state_idx01;
new_arcs_data[new_arc_idx012] = old_arcs_data[arc_idx012];
if (arc_map_data) arc_map_data[new_arc_idx012] = arc_idx012;
});
}
{
With w(pr.NewStream());
K2_EVAL(
c, num_states, lambda_set_new_data, (int32_t state_idx01)->void {
int32_t fsa_idx0 = old_row_ids1_data[state_idx01],
fsa_idx0_mod = fsa_idx0_mod_data[fsa_idx0],
state_idx0x = old_row_splits1_data[fsa_idx0],
next_state_idx0x = old_row_splits1_data[fsa_idx0 + 1],
old_arc_idx01x = old_row_splits2_data[state_idx01];
// Below the "+ state_idx01" is because each state gets a self-loop,
// and the "- fsa_idx0_mod" is because final-states don't get a
// self-loop.
int32_t new_arc_idx01x =
old_arc_idx01x + state_idx01 - fsa_idx0_mod;
// The self-loop arc is the first arc:
int32_t new_arc_idx012 = new_arc_idx01x;
new_row_splits2_data[state_idx01] = new_arc_idx01x;
if (state_idx01 + 1 < next_state_idx0x) { // not final-state
new_row_ids2_data[new_arc_idx012] = state_idx01;
int32_t state_idx1 = state_idx01 - state_idx0x;
new_arcs_data[new_arc_idx012] =
Arc(state_idx1, state_idx1, 0, 0.0);
if (arc_map_data) arc_map_data[new_arc_idx012] = -1;
} else if (state_idx01 + 1 == num_states) {
// Note: if num_states was zero we would have returned above, so
// we dont have to worry about an empty FsaVec.
new_row_splits2_data[num_states] = new_arc_idx01x;
}
});
}
pr.Finish();
*dest =
Ragged<Arc>(RaggedShape3(&src.RowSplits(1), &src.RowIds(1), num_states,
&new_row_splits2, &new_row_ids2, new_num_arcs),
new_arcs);
}
}
Fsa Union(FsaVec &fsas, Array1<int32_t> *arc_map /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(fsas.NumAxes(), 3);
ContextPtr &context = fsas.Context();
const int32_t *fsas_row_splits1_data = fsas.RowSplits(1).Data();
const int32_t *fsas_row_splits2_data = fsas.RowSplits(2).Data();
const int32_t *fsas_row_ids1_data = fsas.RowIds(1).Data();
const int32_t *fsas_row_ids2_data = fsas.RowIds(2).Data();
const Arc *arcs_data = fsas.values.Data();
int32_t num_fsas = fsas.Dim0();
int32_t num_states = fsas.TotSize(1);
int32_t num_arcs = fsas.TotSize(2);
// A new start state and a new final state are added (+2).
// The final state of each fsa is removed (-num_fsas)
int32_t num_out_states = num_states + 2 - num_fsas;
int32_t out_final_state = num_out_states - 1;
// For every fsa, a new arc is added from the new start state
// to its original start state (+num_fsas)
int32_t num_out_arcs = num_arcs + num_fsas;
Array1<int32_t> out_row_ids(context, num_out_arcs);
Array1<Arc> out_arcs(context, num_out_arcs);
Array1<int32_t> tmp_arc_map(context, num_out_arcs, -1);
int32_t *tmp_arc_map_data = tmp_arc_map.Data();
int32_t *out_row_ids_data = out_row_ids.Data();
Arc *out_arcs_data = out_arcs.Data();
K2_EVAL(
context, num_arcs, lambda_set_out, (int32_t fsas_arc_idx012) {
int32_t fsas_state_idx01 = fsas_row_ids2_data[fsas_arc_idx012];
int32_t fsas_idx0 = fsas_row_ids1_data[fsas_state_idx01];
int32_t this_fsa_final_state_idx01 =
fsas_row_splits1_data[fsas_idx0 + 1] - 1;
K2_DCHECK_GT(this_fsa_final_state_idx01, fsas_state_idx01)
<< "We support only FSAs with at least two states at present";
int32_t fsas_state_idx0x = fsas_row_splits1_data[fsas_idx0];
int32_t fsas_state_idx1 = fsas_state_idx01 - fsas_state_idx0x;
int32_t this_fsa_final_state_idx1 =
this_fsa_final_state_idx01 - fsas_state_idx0x;
int32_t fsas_arc_idx0xx = fsas_row_splits2_data[fsas_state_idx0x];
// fsa0: +1 (a new start state)
// fsa1: +0 (the final state of fsa0 is removed)
// fsa2: -1 (the final state of fsa1 is removed)
// fsa3: -2 (the final state of fsa2 is removed)
int32_t state_offset = 1 - fsas_idx0;
int32_t out_state_idx0 = fsas_state_idx01 + state_offset;
int32_t out_arc_idx01 = fsas_arc_idx012 + num_fsas;
out_row_ids_data[out_arc_idx01] = out_state_idx0;
Arc arc = arcs_data[fsas_arc_idx012];
K2_DCHECK_EQ(arc.src_state, fsas_state_idx1);
if (arc.dest_state == this_fsa_final_state_idx1)
arc.dest_state = out_final_state;
else
arc.dest_state = arc.dest_state - arc.src_state + out_state_idx0;
arc.src_state = out_state_idx0;
out_arcs_data[out_arc_idx01] = arc;
tmp_arc_map_data[out_arc_idx01] = fsas_arc_idx012;
if (fsas_arc_idx0xx == fsas_arc_idx012) {
// add a new arc from the new start state to the start state
// of this fsa
//
// WARNING: we cannot use fsas_state_idx01 here
// since the start state may have no leaving arcs!
Arc arc(0, fsas_state_idx0x + state_offset, 0, 0);
out_arcs_data[fsas_idx0] = arc;
out_row_ids_data[fsas_idx0] = 0;
}
});
if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map);
Array1<int32_t> out_row_splits(context, num_out_states + 1);
RowIdsToRowSplits(out_row_ids, &out_row_splits);
RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs);
Fsa ans = Ragged<Arc>(shape, out_arcs);
return ans;
}
Fsa Closure(Fsa &fsa, Array1<int32_t> *arc_map /* = nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(fsa.NumAxes(), 2) << "We support only a single FSA.";
ContextPtr &c = fsa.Context();
int32_t num_states = fsa.Dim0();
if (num_states < 2) {
K2_CHECK_EQ(num_states, 0)
<< "An empty fsa should contain no states at all";
if (arc_map != nullptr) *arc_map = Array1<int32_t>(c, 0);
return fsa; // return itself if the input fsa is empty
}
const int32_t *fsa_row_splits_data = fsa.RowSplits(1).Data();
const int32_t *fsa_row_ids_data = fsa.RowIds(1).Data();
const Arc *fsa_arcs_data = fsa.values.Data();
int32_t fsa_final_state = num_states - 1;
int32_t num_out_states = num_states;
// An arc from the start state to the final state with label == -1 is added.
int32_t num_out_arcs = fsa.values.Dim() + 1;
Array1<int32_t> out_row_ids(c, num_out_arcs);
int32_t *out_row_ids_data = out_row_ids.Data();
Array1<Arc> out_arcs(c, num_out_arcs);
Arc *out_arcs_data = out_arcs.Data();
Array1<int32_t> tmp_arc_map(c, num_out_arcs);
int32_t *tmp_arc_map_data = tmp_arc_map.Data();
K2_EVAL(
c, fsa.values.Dim(), lambda_set_arcs, (int32_t fsa_arc_idx01) {
int32_t fsa_state_idx0 = fsa_row_ids_data[fsa_arc_idx01];
int32_t fsa_arc_idx0x = fsa_row_splits_data[fsa_state_idx0];
int32_t fsa_arc_idx1 = fsa_arc_idx01 - fsa_arc_idx0x;
int32_t this_state_num_arcs =
fsa_row_splits_data[fsa_state_idx0 + 1] - fsa_arc_idx0x;
Arc arc = fsa_arcs_data[fsa_arc_idx01];
if (arc.dest_state == fsa_final_state) {
// modify arcs entering the final state such that:
// - dest_state == 0
// - label == 0
arc.dest_state = 0;
K2_DCHECK_EQ(arc.label, -1);
arc.label = 0;
}
int out_arc_idx01;
if (arc.src_state > 0) {
// this arc is not originated from the start state, so its index is
// incremented
out_arc_idx01 = fsa_arc_idx01 + 1;
} else {
out_arc_idx01 = fsa_arc_idx01;
if (fsa_arc_idx1 == this_state_num_arcs - 1) {
// This is the last arc of the original start state,
// so we add a new arc just after it.
Arc new_arc(0, fsa_final_state, -1, 0.0f);
out_arcs_data[out_arc_idx01 + 1] = new_arc;
out_row_ids_data[out_arc_idx01 + 1] = 0;
tmp_arc_map_data[out_arc_idx01 + 1] = -1;
}
}
// it may happen that the start state has no leaving arcs
if (fsa_row_splits_data[1] == 0) {
Arc new_arc(0, fsa_final_state, -1, 0.0f);
out_arcs_data[0] = new_arc;
out_row_ids_data[0] = 0;
tmp_arc_map_data[0] = -1;
}
tmp_arc_map_data[out_arc_idx01] = fsa_arc_idx01;
out_arcs_data[out_arc_idx01] = arc;
out_row_ids_data[out_arc_idx01] = arc.src_state;
});
if (arc_map != nullptr) *arc_map = std::move(tmp_arc_map);
Array1<int32_t> out_row_splits(c, num_out_states + 1);
int32_t *out_row_splits_data = out_row_splits.Data();
K2_EVAL(
c, out_row_splits.Dim(), lambda_set_row_splits, (int32_t i) {
if (i == 0)
out_row_splits_data[i] = 0;
else
out_row_splits_data[i] = fsa_row_splits_data[i] + 1;
});
RaggedShape shape = RaggedShape2(&out_row_splits, &out_row_ids, num_out_arcs);
Fsa ans = Ragged<Arc>(shape, out_arcs);
return ans;
}
FsaOrVec ExpandArcs(FsaOrVec &fsas, RaggedShape &labels_shape,
Array1<int32_t> *fsas_arc_map /*=nullptr*/,
Array1<int32_t> *labels_arc_map /*=nullptr*/) {
NVTX_RANGE(K2_FUNC);
if (fsas.NumAxes() == 2) {
FsaVec fsas_temp = FsaToFsaVec(fsas);
return ExpandArcs(fsas_temp, labels_shape, fsas_arc_map, labels_arc_map)
.RemoveAxis(0);
}
K2_CHECK_EQ(fsas.NumAxes(), 3);
K2_CHECK_EQ(labels_shape.NumAxes(), 2);
K2_CHECK_EQ(fsas.NumElements(), labels_shape.Dim0());
ContextPtr &c = fsas.Context();
K2_CHECK(c->IsCompatible(*labels_shape.Context()));
RaggedShape state_to_arcs = GetLayer(fsas.shape, 1);
// `state_to_foo` is a RaggedShape that, for each state in `fsas`, has a list
// of length `num_arcs + 1`, where `num_arcs` is the number of arcs leaving
// this state in `fsas`. Interpret this as: one element for the state
// itself, then one for each arc leaving it. This `foo` is an index that
// corresponds to num-arcs plus one, but because it is really a placeholder
// and we want to keep it distinct from other things, we call it `foo`.
RaggedShape state_to_foo = ChangeSublistSize(state_to_arcs, 1);
int32_t foo_size = state_to_foo.NumElements();
// For each element of `state_to_foo`, `num_ostates_for` says how many states
// there will be for this (state,foo) in the returned (output) FSA. Here, the
// idx0 is the state, the idx1 is foo. If idx1 == 0 (interpret this as "the
// state itself"), then `num_ostates_for[idx01] = 1`, meaning "keep the
// original state". Otherwise, idx1 - 1 represents an arc_idx2 [into `fsas`],
// and we set `num_ostates_for[idx01] = max(0, seq_len-1)`, where seq_len is
// the length of the sequence in `labels_shape` corresponding to this
// arc-index.
Array1<int32_t> num_ostates_for(c, foo_size + 1);
int32_t *num_ostates_for_data = num_ostates_for.Data();
const int32_t *labels_row_splits1_data = labels_shape.RowSplits(1).Data(),
*fsas_row_splits2_data = fsas.RowSplits(2).Data(),
*state_to_foo_row_splits1_data =
state_to_foo.RowSplits(1).Data(),
*state_to_foo_row_ids1_data = state_to_foo.RowIds(1).Data();
K2_EVAL(
c, foo_size, lambda_set_num_ostates, (int32_t idx01)->void {
// note: the idx01, idx0, idx0x are into `state_to_foo`.
// This idx0 is a state-index into `fsas` (an idx01 w.r.t. `fsas`).
int32_t idx0 = state_to_foo_row_ids1_data[idx01],
idx0x = state_to_foo_row_splits1_data[idx0],
idx1 = idx01 - idx0x; // idx1 is `foo`.
int32_t num_ostates;
if (idx1 == 0) {
num_ostates = 1; // this is a copy of the original state.
} else {
int32_t fsas_arc_idx2 = idx1 - 1, fsas_state_idx01 = idx0,
fsas_arc_idx01x = fsas_row_splits2_data[fsas_state_idx01],
fsas_arc_idx012 = fsas_arc_idx01x + fsas_arc_idx2,
labels_shape_idx0 = fsas_arc_idx012,
labels_shape_idx0x =
labels_row_splits1_data[labels_shape_idx0],
labels_shape_idx0x_next =
labels_row_splits1_data[labels_shape_idx0 + 1],
labels_shape_len1 =
labels_shape_idx0x_next - labels_shape_idx0x;
// A sequence of n symbols will require n-1 extra states to represent
// it.
num_ostates = max(labels_shape_len1 - 1, (int32_t)0);
}
num_ostates_for_data[idx01] = num_ostates;
});
ExclusiveSum(num_ostates_for, &num_ostates_for);
Array1<int32_t> &foo_to_ostates_row_splits = num_ostates_for;
RaggedShape foo_to_ostates =
RaggedShape2(&foo_to_ostates_row_splits, nullptr, -1);
// to_ostates_shape has 4 axes: [fsa_id][orig_state][foo][ostate]
// where foo is a general-purpose index that ranges over the (num_arcs + 1) of
// the original state.
RaggedShape to_ostates_shape = ComposeRaggedShapes3(
GetLayer(fsas.shape, 0), state_to_foo, foo_to_ostates);
// Below, `tos` means `to_ostates_shape`.
const int32_t *tos_row_splits1_data = to_ostates_shape.RowSplits(1).Data(),
*tos_row_ids1_data = to_ostates_shape.RowIds(1).Data(),
*tos_row_splits2_data = to_ostates_shape.RowSplits(2).Data(),
*tos_row_ids2_data = to_ostates_shape.RowIds(2).Data(),
*tos_row_splits3_data = to_ostates_shape.RowSplits(3).Data(),
*tos_row_ids3_data = to_ostates_shape.RowIds(3).Data();
// `num_oarcs` gives the number of arcs in the returned (output) FSA for each
// `ostate` (i.e. leaving each state in the returned FSA).
int32_t tot_ostates = to_ostates_shape.NumElements();
Array1<int32_t> num_oarcs(c, tot_ostates + 1);
int32_t *num_oarcs_data = num_oarcs.Data();
K2_EVAL(
c, tot_ostates, lambda_set_num_oarcs, (int32_t idx0123)->void {
// All these indexes are into `to_ostates_shape`, indexed
// `[fsa][state][foo][ostate].`
int32_t idx012 = tos_row_ids3_data[idx0123],
idx012x = tos_row_splits3_data[idx012],
idx01 = tos_row_ids2_data[idx012],
idx01x = tos_row_splits2_data[idx01],
idx01x_next = tos_row_splits2_data[idx01 + 1],
len2 = idx01x_next - idx01x, idx2 = idx012 - idx01x,
idx3 = idx0123 - idx012x;
int32_t num_arcs;
if (idx2 == 0) {
K2_CHECK_EQ(idx3, 0);
// This ostate corresponds to the original state; it is not one of the
// extra states added to support chains of arcs.
// The original state had `orig_num_arcs` leaving it, which is the
// number of `foo` indexes minus one.
int32_t orig_num_arcs = len2 - 1;
num_arcs = orig_num_arcs;
} else {
// All newly-created states have exactly one arc leaving them.
num_arcs = 1;
}
num_oarcs_data[idx0123] = num_arcs;
});
ExclusiveSum(num_oarcs, &num_oarcs);
Array1<int32_t> &ostate_to_oarcs_row_splits = num_oarcs;
RaggedShape ostate_to_oarcs =
RaggedShape2(&ostate_to_oarcs_row_splits, nullptr, -1);
// `full_shape` has 5 axes: [fsa][orig_state][foo][ostate][oarc]
RaggedShape full_shape =
ComposeRaggedShapes(to_ostates_shape, ostate_to_oarcs);
// for the lower-order row-splits and row-ids, use tot_row_{splits,idx}n_data
const int32_t *full_row_splits4_data = full_shape.RowSplits(4).Data(),
*full_row_ids4_data = full_shape.RowIds(4).Data();
int32_t tot_oarcs = full_shape.NumElements();
K2_CHECK_GE(tot_oarcs, fsas.NumElements());
int32_t *fsas_arc_map_data = nullptr, *labels_arc_map_data = nullptr;
if (fsas_arc_map) {
*fsas_arc_map = Array1<int32_t>(c, tot_oarcs);
fsas_arc_map_data = fsas_arc_map->Data();
}
if (labels_arc_map) {
*labels_arc_map = Array1<int32_t>(c, tot_oarcs);
labels_arc_map_data = labels_arc_map->Data();
}
Array1<Arc> oarcs(c, tot_oarcs);
Arc *oarcs_data = oarcs.Data();
const Arc *arcs_data = fsas.values.Data();
K2_EVAL(
c, tot_oarcs, lambda_set_arcs, (int32_t idx01234)->void {
// All these indexes are into `full_shape`, indexed
// `[fsa][state][foo][ostate][oarc].`
int32_t idx0123 = full_row_ids4_data[idx01234],
idx0123x = full_row_splits4_data[idx0123],
idx4 = idx01234 - idx0123x, idx012 = tos_row_ids3_data[idx0123],
idx012x = tos_row_splits3_data[idx012],
idx3 = idx0123 - idx012x, idx01 = tos_row_ids2_data[idx012],
idx01x = tos_row_splits2_data[idx01], idx2 = idx012 - idx01x,
idx0 = tos_row_ids1_data[idx01],
idx0x = tos_row_splits1_data[idx0],
idx0xxx = tos_row_splits3_data[tos_row_splits2_data[idx0x]];
int32_t fsa_idx01x = fsas_row_splits2_data[idx01];
int32_t fsa_idx2; // the idx2 (arc-index) into `fsas` of the input arc
// that's most relevant to us..
int32_t seq_pos; // seq_pos is our index into the sequence of arcs that
// we produce for each original arc
if (idx2 == 0) {
K2_CHECK_EQ(idx3, 0);
fsa_idx2 = idx4; // corresponds to foo=0, so idx3 will be 0; the idx4
// enumerates the arcs leaving it..
seq_pos = 0;
} else {
// this is one of the extra `foo` indexes, one per arc in the input
// FSA that leaves this state; each of those `foo` indexes has
// (seq_len - 1) states in it (idx3=0,1..seq_len-1); and each state
// has one arc leaving it (idx4==0).
K2_CHECK_EQ(idx4, 0);
fsa_idx2 = idx2 - 1;
seq_pos = idx3 + 1;
}
int32_t fsa_idx012 = fsa_idx01x + fsa_idx2; // index of the arc in
// source FSA FSA that
// we're expanding..
Arc iarc = arcs_data[fsa_idx012];
int32_t labels_idx0x = labels_row_splits1_data[fsa_idx012],
labels_next_idx0x = labels_row_splits1_data[fsa_idx012 + 1],
labels_len1 = labels_next_idx0x - labels_idx0x;
// labels_len1 is length of label sequence for this arc
K2_CHECK_LT(seq_pos, max(int32_t(1), labels_len1));
int32_t dest_idx01 = idx0x + iarc.dest_state, // original destination
// state-index
orig_dest_idx0123 =
tos_row_splits3_data[tos_row_splits2_data[dest_idx01]];
Arc oarc;
oarc.src_state = idx0123 - idx0xxx;
// If this is the last arc in the sequence, the dest-state is the
// original dest-state of the arc. Otherwise the dest-state is one of
// the new states that we created. The idx123 will be an idx1 after
// removing axes.
int32_t dest_idx123;
if (seq_pos + 1 >= labels_len1) { // last arc in sequence..
dest_idx123 = orig_dest_idx0123 - idx0xxx;
} else {
int32_t dest_state_idx2 = fsa_idx2 + 1, // index `foo` equals
// orig_arc_idx+1
dest_state_idx3 = seq_pos, // ostate index..
dest_idx012 = idx01x + dest_state_idx2,
dest_idx012x = tos_row_splits3_data[dest_idx012],
dest_idx0123 = dest_idx012x + dest_state_idx3;
dest_idx123 = dest_idx0123 - idx0xxx;
}
oarc.dest_state = dest_idx123; // indexes 1,2,3 will be combined; in
// the output FSA it will be an idx1.
if (fsas_arc_map_data)
fsas_arc_map_data[idx01234] = (seq_pos == 0 ? fsa_idx012 : -1);
if (labels_arc_map_data)
labels_arc_map_data[idx01234] =
(seq_pos < labels_len1 ? labels_idx0x + seq_pos : -1);
if (iarc.label != -1) {
// normal case.. label goes on 1st arc in sequence
oarc.label = (seq_pos == 0 ? iarc.label : 0);
} else {
// If the arc was to the final-state, we need to keep the label on the
// last arc of the sequence to keep the output valid. The following
// would be "seq_pos + 1 == labels_len1 ? -1 : 0", but we make it ">="
// not "=" to account for the case seq_pos=0, labels_len1 = 0.
oarc.label = (seq_pos + 1 >= labels_len1 ? -1 : 0);
}
oarc.score = (seq_pos == 0 ? iarc.score : 0.0);
oarcs_data[idx01234] = oarc;
});
// remove current axes 1 and 2... [after removing axis 1, old axis 2 becomes
// axis 1, so remove axis 1 twice].
RaggedShape temp = RemoveAxis(full_shape, 1);
return FsaVec(RemoveAxis(temp, 1), oarcs);
}
void Invert(FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest,
Ragged<int32_t> *dest_aux_labels,
Array1<int32_t> *arc_map /*= nullptr*/) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src_aux_labels.NumAxes(), 2);
K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements());
K2_CHECK(dest != nullptr && dest_aux_labels != nullptr);
ContextPtr c = GetContext(src, src_aux_labels);
if (src.NumAxes() == 2) {
Fsa *srcs = &src;
FsaVec src_vec = CreateFsaVec(1, &srcs), dest_vec;
Invert(src_vec, src_aux_labels, &dest_vec, dest_aux_labels, arc_map);
*dest = GetFsaVecElement(dest_vec, 0);
return;
}
Array1<int32_t> src_arc_map, labels_arc_map;
*dest = ExpandArcs(src, src_aux_labels.shape, &src_arc_map, &labels_arc_map);
// swap labels and aux_labels
int32_t dest_num_arcs = dest->NumElements();
Arc *dest_arcs_data = dest->values.Data();
const int32_t *labels_arc_map_data = labels_arc_map.Data(),
*src_aux_labels_data = src_aux_labels.values.Data();
Array1<int32_t> dest_aux_labels_row_splits(c, dest_num_arcs + 1);
int32_t *dest_aux_labels_row_splits_data = dest_aux_labels_row_splits.Data();
K2_EVAL(
c, dest_num_arcs, lambda_set_dest_aux_labels_num,
(int32_t dest_idx012)->void {
Arc &dest_arc = dest_arcs_data[dest_idx012];
// we'll remove epsilons in dest_aux_labels
dest_aux_labels_row_splits_data[dest_idx012] =
dest_arc.label == 0 ? 0 : 1;
});
ExclusiveSum(dest_aux_labels_row_splits.Arange(0, dest_num_arcs),
&dest_aux_labels_row_splits);
RaggedShape dest_aux_labels_shape =
RaggedShape2(&dest_aux_labels_row_splits, nullptr, -1);
Array1<int32_t> dest_aux_labels_values(c,
dest_aux_labels_shape.NumElements());
int32_t *dest_aux_labels_values_data = dest_aux_labels_values.Data();
K2_EVAL(
c, dest_num_arcs, lambda_set_dest_labels_and_aux_labels,
(int32_t dest_idx012)->void {
Arc &dest_arc = dest_arcs_data[dest_idx012];
// swap label and aux_label
if (dest_arc.label != 0) {
int32_t dest_aux_labels_idx0x =
dest_aux_labels_row_splits_data[dest_idx012];
// every arc in dest has at most one aux_label (as the aux_label is
// the label of src on this arc)
dest_aux_labels_values_data[dest_aux_labels_idx0x] = dest_arc.label;
}
int32_t src_aux_labels_idx01 = labels_arc_map_data[dest_idx012];
dest_arc.label = src_aux_labels_idx01 == -1
? 0
: src_aux_labels_data[src_aux_labels_idx01];
});
*dest_aux_labels =
Ragged<int32_t>(dest_aux_labels_shape, dest_aux_labels_values);
if (arc_map != nullptr) *arc_map = src_arc_map;
}
// Will be used in InvertHost to process FsaVec input recursively.
void RecursionWrapperAuxLabels(void (*f)(FsaOrVec &, Ragged<int32_t> &,
FsaOrVec *, Ragged<int32_t> *),
FsaOrVec &src, Ragged<int32_t> &src_aux_labels,
FsaOrVec *dest,
Ragged<int32_t> *dest_aux_labels) {
NVTX_RANGE(K2_FUNC);
// src is actually an FsaVec. Just recurse for now.
K2_CHECK_EQ(src.NumAxes(), 3);
int32_t num_fsas = src.shape.Dim0();
std::vector<Fsa> srcs(num_fsas), dests(num_fsas);
std::vector<Ragged<int32_t>> src_aux_labels_vec(num_fsas),
dest_aux_labels_vec(num_fsas);
int32_t tot_num_arcs = 0;
Array1<int32_t> src_aux_labels_row_splits = src_aux_labels.RowSplits(1),
src_aux_labels_values = src_aux_labels.values;
for (int32_t i = 0; i < num_fsas; ++i) {
srcs[i] = src.Index(0, i);
int32_t cur_num_arcs = srcs[i].NumElements();
// below block get aux_labels for srcs[i]
// TODO(haowen): replace with Range op for ragged
{
Array1<int32_t> row_splits = src_aux_labels_row_splits.Arange(
tot_num_arcs, tot_num_arcs + cur_num_arcs + 1);
Array1<int32_t> values =
src_aux_labels_values.Arange(row_splits[0], row_splits.Back());
row_splits = Minus(row_splits, row_splits[0]);
RaggedShape shape = RaggedShape2(&row_splits, nullptr, -1);
src_aux_labels_vec[i] = Ragged<int32_t>(shape, values);
}
f(srcs[i], src_aux_labels_vec[i], &(dests[i]), &(dest_aux_labels_vec[i]));
tot_num_arcs += cur_num_arcs;
}
*dest = Stack(0, num_fsas, dests.data());
*dest_aux_labels = Cat(0, num_fsas, dest_aux_labels_vec.data());
}
void InvertHost(FsaOrVec &src, Ragged<int32_t> &src_aux_labels, FsaOrVec *dest,
Ragged<int32_t> *dest_aux_labels) {
NVTX_RANGE(K2_FUNC);
K2_CHECK_EQ(src_aux_labels.NumAxes(), 2);
K2_CHECK_EQ(src_aux_labels.Dim0(), src.NumElements());
K2_CHECK(dest != nullptr && dest_aux_labels != nullptr);
int32_t num_axes = src.NumAxes();
if (num_axes < 2 || num_axes > 3) {
K2_LOG(FATAL) << "Input has bad num-axes " << num_axes;
} else if (num_axes == 3) {
return RecursionWrapperAuxLabels(InvertHost, src, src_aux_labels, dest,
dest_aux_labels);
}
k2host::Fsa host_fsa = FsaToHostFsa(src);
// k2host::AuxLabels is a k2host::Array2
k2host::AuxLabels host_aux_labels(
src_aux_labels.Dim0(), src_aux_labels.NumElements(),
src_aux_labels.RowSplits(1).Data(), src_aux_labels.values.Data());
k2host::FstInverter inverter(host_fsa, host_aux_labels);
k2host::Array2Size<int32_t> fsa_size, aux_size;
inverter.GetSizes(&fsa_size, &aux_size);
FsaCreator fsa_creator(fsa_size);
k2host::Fsa host_dest_fsa = fsa_creator.GetHostFsa();
Ragged2Creator<int32_t> ragged_creator(aux_size);
k2host::AuxLabels host_dest_aux_labels = ragged_creator.GetHostArray2();
inverter.GetOutput(&host_dest_fsa, &host_dest_aux_labels);
*dest = fsa_creator.GetFsa();
*dest_aux_labels = ragged_creator.GetRagged2();
}
FsaOrVec RemoveEpsilonSelfLoops(FsaOrVec &src,
Array1<int32_t> *arc_map /* = nullptr */) {
NVTX_RANGE(K2_FUNC);
if (src.NumAxes() == 2) {
FsaVec temp = FsaToFsaVec(src);
return RemoveEpsilonSelfLoops(temp, arc_map).RemoveAxis(0);
}
K2_CHECK_EQ(src.NumAxes(), 3);
ContextPtr &c = src.Context();
int32_t num_arcs = src.NumElements();
Renumbering renumber_lists(c, num_arcs);
char *keep_list_data = renumber_lists.Keep().Data();
const Arc *arcs_data = src.values.Data();
K2_EVAL(
c, num_arcs, lambda_set_keep, (int32_t i)->void {
Arc arc = arcs_data[i];
char keep;
if (arc.label == 0 && arc.src_state == arc.dest_state) {
// This arc is an epsilon self-loop, so it should be removed
keep = 0;
} else {
keep = 1;
}
keep_list_data[i] = keep;
});
FsaVec ans = Index(src, 2, renumber_lists.New2Old(), arc_map);
return ans;
}
} // namespace k2
|
ee6ca3bbc383f20c3a6fef1b7219586d2e44cd00.hip | // !!! This is a file automatically generated by hipify!!!
/* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <hip/hip_runtime.h>
#include <hip/hip_runtime.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include <algorithm>
#include <iterator>
#include <random>
#include <sstream>
#include <vector>
#include "fastertransformer/cuda/hipcub/hipcub.hpp"
#include "fusion_bart_decoding_op.h"
#include "pd_traits.h"
template <paddle::DataType D>
std::vector<paddle::Tensor> bart_decoding_kernel(
const paddle::Tensor& input,
const paddle::Tensor& memory_sequence_length,
const paddle::Tensor& word_emb,
const std::vector<paddle::Tensor>& self_layernorm_weight,
const std::vector<paddle::Tensor>& self_layernorm_bias,
const std::vector<paddle::Tensor>& self_attn_query_weight,
const std::vector<paddle::Tensor>& self_attn_query_bias,
const std::vector<paddle::Tensor>& self_attn_key_weight,
const std::vector<paddle::Tensor>& self_attn_key_bias,
const std::vector<paddle::Tensor>& self_attn_value_weight,
const std::vector<paddle::Tensor>& self_attn_value_bias,
const std::vector<paddle::Tensor>& self_attn_output_weight,
const std::vector<paddle::Tensor>& self_attn_output_bias,
const std::vector<paddle::Tensor>& cross_layernorm_weight,
const std::vector<paddle::Tensor>& cross_layernorm_bias,
const std::vector<paddle::Tensor>& cross_attn_query_weight,
const std::vector<paddle::Tensor>& cross_attn_query_bias,
const std::vector<paddle::Tensor>& cross_attn_key_weight,
const std::vector<paddle::Tensor>& cross_attn_key_bias,
const std::vector<paddle::Tensor>& cross_attn_value_weight,
const std::vector<paddle::Tensor>& cross_attn_value_bias,
const std::vector<paddle::Tensor>& cross_attn_output_weight,
const std::vector<paddle::Tensor>& cross_attn_output_bias,
const std::vector<paddle::Tensor>& ffn_layernorm_weight,
const std::vector<paddle::Tensor>& ffn_layernorm_bias,
const std::vector<paddle::Tensor>& ffn_intermediate_weight,
const std::vector<paddle::Tensor>& ffn_intermediate_bias,
const std::vector<paddle::Tensor>& ffn_output_weight,
const std::vector<paddle::Tensor>& ffn_output_bias,
const paddle::Tensor& decoder_layernorm_weight,
const paddle::Tensor& decoder_layernorm_bias,
const paddle::Tensor& embedding_weight,
const paddle::Tensor& embedding_bias,
const paddle::Tensor& position_encoding_table,
paddle::Tensor& output_ids,
paddle::Tensor& parent_ids,
paddle::Tensor& sequence_length,
std::string decoding_strategy,
int beam_size,
int topk,
float topp,
int head_num_,
int size_per_head_,
int num_layer_,
int start_id_,
int end_id_,
int64_t max_seq_len_,
float beam_search_diversity_rate_,
float alpha,
hipblasHandle_t cublas_handle_,
hipStream_t stream) {
int beam_width_ = (decoding_strategy == "beam_search" ||
decoding_strategy == "beam_search_v2")
? beam_size
: 1;
int candidate_num_ = (decoding_strategy == "topk_sampling" ||
decoding_strategy == "topp_sampling")
? topk
: 1;
float probability_threshold_ = (decoding_strategy == "topk_sampling" ||
decoding_strategy == "topp_sampling")
? topp
: 0.0;
auto input_dims = input.shape();
int batch_size_ = (decoding_strategy == "beam_search" ||
decoding_strategy == "beam_search_v2")
? input_dims[0] / beam_width_
: input_dims[0];
const int memory_max_seq_len = input_dims[1];
const int memory_hidden_dim = input_dims[2];
const int vocab_size = word_emb.shape()[0];
typedef PDTraits<D> traits_;
typedef typename traits_::DataType DataType_;
typedef typename traits_::data_t data_t_;
DecodingInitParam<DataType_> decoding_params;
decoding_params.cublas_handle = cublas_handle_;
decoding_params.output_ids = output_ids.mutable_data<int>(input.place());
decoding_params.parent_ids = parent_ids.mutable_data<int>(input.place());
decoding_params.sequence_length =
sequence_length.mutable_data<int>(input.place());
typedef DecoderTransformerTraits<traits_::OpType> DecodingTraits_;
decoding_params.stream = stream;
fastertransformer::Allocator<AllocatorType::PD> allocator_(stream);
decoding_params.memory_tensor =
reinterpret_cast<const DataType_*>(input.data<data_t_>());
decoding_params.memory_sequence_length = memory_sequence_length.data<int>();
DecoderInitParam<DataType_>* params =
new DecoderInitParam<DataType_>[num_layer_];
for (int i = 0; i < num_layer_; i++) {
params[i].stream = stream;
params[i].cublas_handle = cublas_handle_;
// self attn
params[i].self_layernorm.gamma = reinterpret_cast<const DataType_*>(
self_layernorm_weight[i].data<data_t_>());
params[i].self_layernorm.beta = reinterpret_cast<const DataType_*>(
self_layernorm_bias[i].data<data_t_>());
// query
params[i].self_attention.query_weight.kernel =
reinterpret_cast<const DataType_*>(
self_attn_query_weight[i].data<data_t_>());
params[i].self_attention.query_weight.bias =
reinterpret_cast<const DataType_*>(
self_attn_query_bias[i].data<data_t_>());
// key
params[i].self_attention.key_weight.kernel =
reinterpret_cast<const DataType_*>(
self_attn_key_weight[i].data<data_t_>());
params[i].self_attention.key_weight.bias =
reinterpret_cast<const DataType_*>(
self_attn_key_bias[i].data<data_t_>());
// value
params[i].self_attention.value_weight.kernel =
reinterpret_cast<const DataType_*>(
self_attn_value_weight[i].data<data_t_>());
params[i].self_attention.value_weight.bias =
reinterpret_cast<const DataType_*>(
self_attn_value_bias[i].data<data_t_>());
// out proj
params[i].self_attention.attention_output_weight.kernel =
reinterpret_cast<const DataType_*>(
self_attn_output_weight[i].data<data_t_>());
params[i].self_attention.attention_output_weight.bias =
reinterpret_cast<const DataType_*>(
self_attn_output_bias[i].data<data_t_>());
// cross
params[i].cross_layernorm.gamma = reinterpret_cast<const DataType_*>(
cross_layernorm_weight[i].data<data_t_>());
params[i].cross_layernorm.beta = reinterpret_cast<const DataType_*>(
cross_layernorm_bias[i].data<data_t_>());
// query
params[i].cross_attention.query_weight.kernel =
reinterpret_cast<const DataType_*>(
cross_attn_query_weight[i].data<data_t_>());
params[i].cross_attention.query_weight.bias =
reinterpret_cast<const DataType_*>(
cross_attn_query_bias[i].data<data_t_>());
// key
params[i].cross_attention.key_weight.kernel =
reinterpret_cast<const DataType_*>(
cross_attn_key_weight[i].data<data_t_>());
params[i].cross_attention.key_weight.bias =
reinterpret_cast<const DataType_*>(
cross_attn_key_bias[i].data<data_t_>());
// value
params[i].cross_attention.value_weight.kernel =
reinterpret_cast<const DataType_*>(
cross_attn_value_weight[i].data<data_t_>());
params[i].cross_attention.value_weight.bias =
reinterpret_cast<const DataType_*>(
cross_attn_value_bias[i].data<data_t_>());
// out proj
params[i].cross_attention.attention_output_weight.kernel =
reinterpret_cast<const DataType_*>(
cross_attn_output_weight[i].data<data_t_>());
params[i].cross_attention.attention_output_weight.bias =
reinterpret_cast<const DataType_*>(
cross_attn_output_bias[i].data<data_t_>());
// ffn
params[i].ffn_layernorm.gamma = reinterpret_cast<const DataType_*>(
ffn_layernorm_weight[i].data<data_t_>());
params[i].ffn_layernorm.beta = reinterpret_cast<const DataType_*>(
ffn_layernorm_bias[i].data<data_t_>());
// intermediate proj
params[i].ffn.intermediate_weight.kernel =
reinterpret_cast<const DataType_*>(
ffn_intermediate_weight[i].data<data_t_>());
params[i].ffn.intermediate_weight.bias = reinterpret_cast<const DataType_*>(
ffn_intermediate_bias[i].data<data_t_>());
// out proj
params[i].ffn.output_weight.kernel = reinterpret_cast<const DataType_*>(
ffn_output_weight[i].data<data_t_>());
params[i].ffn.output_weight.bias =
reinterpret_cast<const DataType_*>(ffn_output_bias[i].data<data_t_>());
}
decoding_params.layernorm.gamma = reinterpret_cast<const DataType_*>(
decoder_layernorm_weight.data<data_t_>());
decoding_params.layernorm.beta = reinterpret_cast<const DataType_*>(
decoder_layernorm_bias.data<data_t_>());
// for embedding
decoding_params.embedding_table =
reinterpret_cast<const DataType_*>(word_emb.data<data_t_>());
// for weight sharing matmul
decoding_params.embedding_kernel =
reinterpret_cast<const DataType_*>(embedding_weight.data<data_t_>());
// NOTE: the data type of the embedding bias for logits is different
// between decoding with beam search and top-k/top-p sampling in
// Faster Transformer when using float16.
if ("beam_search" == decoding_strategy ||
"beam_search_v2" == decoding_strategy) {
// for matmul bias
decoding_params.embedding_bias =
reinterpret_cast<const float*>(embedding_bias.data<float>());
} else if ("topk_sampling" == decoding_strategy ||
"topp_sampling" == decoding_strategy) {
decoding_params.embedding_bias_T =
reinterpret_cast<const DataType_*>(embedding_bias.data<data_t_>());
}
decoding_params.position_encoding_table = reinterpret_cast<const DataType_*>(
position_encoding_table.data<data_t_>());
if ("beam_search" == decoding_strategy) {
DecodingBeamsearch<DecodingTraits_::OpType>* decoding_beamsearch_;
decoding_beamsearch_ = new DecodingBeamsearch<DecodingTraits_::OpType>(
allocator_,
batch_size_,
beam_width_,
max_seq_len_,
head_num_,
size_per_head_,
vocab_size,
num_layer_,
memory_hidden_dim,
memory_max_seq_len,
start_id_,
end_id_,
beam_search_diversity_rate_,
false,
false,
alpha,
false,
2,
ActivationType::GELU);
decoding_beamsearch_->forward(params, decoding_params);
delete decoding_beamsearch_;
} else if ("beam_search_v2" == decoding_strategy) {
DecodingBeamsearch<DecodingTraits_::OpType>* decoding_beamsearch_;
decoding_beamsearch_ = new DecodingBeamsearch<DecodingTraits_::OpType>(
allocator_,
batch_size_,
beam_width_,
max_seq_len_,
head_num_,
size_per_head_,
vocab_size,
num_layer_,
memory_hidden_dim,
memory_max_seq_len,
start_id_,
end_id_,
beam_search_diversity_rate_,
true, // is_fuse_topk_softMax_
true, // keep_alive_beam_
alpha,
false,
2,
ActivationType::GELU);
decoding_beamsearch_->forward(params, decoding_params);
delete decoding_beamsearch_;
} else if ("topk_sampling" == decoding_strategy ||
"topp_sampling" == decoding_strategy) {
DecodingSampling<DecodingTraits_::OpType>* decoding_sampling_;
decoding_sampling_ =
new DecodingSampling<DecodingTraits_::OpType>(allocator_,
batch_size_,
max_seq_len_,
head_num_,
size_per_head_,
vocab_size,
num_layer_,
memory_hidden_dim,
memory_max_seq_len,
start_id_,
end_id_,
candidate_num_,
probability_threshold_,
false,
2,
ActivationType::GELU);
decoding_sampling_->forward(params, decoding_params);
delete decoding_sampling_;
} else {
PD_THROW(
"Only beam_search, topk_sampling and topp_sampling are supported for "
"Faster Transformer. ");
}
delete[] params;
return {output_ids, parent_ids, sequence_length};
}
std::vector<paddle::Tensor> BartDecodingCUDAForward(
const paddle::Tensor& input,
const paddle::Tensor& mem_seq_len,
const paddle::Tensor& word_embedding,
const std::vector<paddle::Tensor>& self_ln_weight,
const std::vector<paddle::Tensor>& self_ln_bias,
const std::vector<paddle::Tensor>& self_q_weight,
const std::vector<paddle::Tensor>& self_q_bias,
const std::vector<paddle::Tensor>& self_k_weight,
const std::vector<paddle::Tensor>& self_k_bias,
const std::vector<paddle::Tensor>& self_v_weight,
const std::vector<paddle::Tensor>& self_v_bias,
const std::vector<paddle::Tensor>& self_out_weight,
const std::vector<paddle::Tensor>& self_out_bias,
const std::vector<paddle::Tensor>& cross_ln_weight,
const std::vector<paddle::Tensor>& cross_ln_bias,
const std::vector<paddle::Tensor>& cross_q_weight,
const std::vector<paddle::Tensor>& cross_q_bias,
const std::vector<paddle::Tensor>& cross_k_weight,
const std::vector<paddle::Tensor>& cross_k_bias,
const std::vector<paddle::Tensor>& cross_v_weight,
const std::vector<paddle::Tensor>& cross_v_bias,
const std::vector<paddle::Tensor>& cross_out_weight,
const std::vector<paddle::Tensor>& cross_out_bias,
const std::vector<paddle::Tensor>& ffn_ln_weight,
const std::vector<paddle::Tensor>& ffn_ln_bias,
const std::vector<paddle::Tensor>& ffn_inter_weight,
const std::vector<paddle::Tensor>& ffn_inter_bias,
const std::vector<paddle::Tensor>& ffn_out_weight,
const std::vector<paddle::Tensor>& ffn_out_bias,
const paddle::Tensor& decoder_ln_weight,
const paddle::Tensor& decoder_ln_bias,
const paddle::Tensor& embedding_weight,
const paddle::Tensor& embedding_bias,
const paddle::Tensor& positional_embedding_weight,
paddle::Tensor& output_ids,
paddle::Tensor& parent_ids,
paddle::Tensor& sequence_length,
std::string decoding_strategy,
int beam_size,
int topk,
float topp,
int n_head,
int size_per_head,
int num_layer,
int bos_id,
int eos_id,
int64_t max_len,
float beam_search_diversity_rate,
float alpha) {
auto stream = input.stream();
hipblasHandle_t cublas_handle_;
hipblasCreate(&cublas_handle_);
hipblasSetStream(cublas_handle_, stream);
std::vector<paddle::Tensor> ret;
switch (input.type()) {
case paddle::DataType::FLOAT16: {
ret = bart_decoding_kernel<paddle::DataType::FLOAT16>(
input,
mem_seq_len,
word_embedding,
self_ln_weight,
self_ln_bias,
self_q_weight,
self_q_bias,
self_k_weight,
self_k_bias,
self_v_weight,
self_v_bias,
self_out_weight,
self_out_bias,
cross_ln_weight,
cross_ln_bias,
cross_q_weight,
cross_q_bias,
cross_k_weight,
cross_k_bias,
cross_v_weight,
cross_v_bias,
cross_out_weight,
cross_out_bias,
ffn_ln_weight,
ffn_ln_bias,
ffn_inter_weight,
ffn_inter_bias,
ffn_out_weight,
ffn_out_bias,
decoder_ln_weight,
decoder_ln_bias,
embedding_weight,
embedding_bias,
positional_embedding_weight,
output_ids,
parent_ids,
sequence_length,
decoding_strategy,
beam_size,
topk,
topp,
n_head,
size_per_head,
num_layer,
bos_id,
eos_id,
max_len,
beam_search_diversity_rate,
alpha,
cublas_handle_,
stream);
break;
}
case paddle::DataType::FLOAT32: {
ret = bart_decoding_kernel<paddle::DataType::FLOAT32>(
input,
mem_seq_len,
word_embedding,
self_ln_weight,
self_ln_bias,
self_q_weight,
self_q_bias,
self_k_weight,
self_k_bias,
self_v_weight,
self_v_bias,
self_out_weight,
self_out_bias,
cross_ln_weight,
cross_ln_bias,
cross_q_weight,
cross_q_bias,
cross_k_weight,
cross_k_bias,
cross_v_weight,
cross_v_bias,
cross_out_weight,
cross_out_bias,
ffn_ln_weight,
ffn_ln_bias,
ffn_inter_weight,
ffn_inter_bias,
ffn_out_weight,
ffn_out_bias,
decoder_ln_weight,
decoder_ln_bias,
embedding_weight,
embedding_bias,
positional_embedding_weight,
output_ids,
parent_ids,
sequence_length,
decoding_strategy,
beam_size,
topk,
topp,
n_head,
size_per_head,
num_layer,
bos_id,
eos_id,
max_len,
beam_search_diversity_rate,
alpha,
cublas_handle_,
stream);
break;
}
default: {
PD_THROW(
"NOT supported data type. "
"Only float16 and float32 are supported. ");
break;
}
}
hipblasDestroy(cublas_handle_);
return ret;
}
| ee6ca3bbc383f20c3a6fef1b7219586d2e44cd00.cu | /* Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <cuda.h>
#include <cuda_runtime.h>
#include <curand.h>
#include <curand_kernel.h>
#include <algorithm>
#include <iterator>
#include <random>
#include <sstream>
#include <vector>
#include "fastertransformer/cuda/cub/cub.cuh"
#include "fusion_bart_decoding_op.h"
#include "pd_traits.h"
template <paddle::DataType D>
std::vector<paddle::Tensor> bart_decoding_kernel(
const paddle::Tensor& input,
const paddle::Tensor& memory_sequence_length,
const paddle::Tensor& word_emb,
const std::vector<paddle::Tensor>& self_layernorm_weight,
const std::vector<paddle::Tensor>& self_layernorm_bias,
const std::vector<paddle::Tensor>& self_attn_query_weight,
const std::vector<paddle::Tensor>& self_attn_query_bias,
const std::vector<paddle::Tensor>& self_attn_key_weight,
const std::vector<paddle::Tensor>& self_attn_key_bias,
const std::vector<paddle::Tensor>& self_attn_value_weight,
const std::vector<paddle::Tensor>& self_attn_value_bias,
const std::vector<paddle::Tensor>& self_attn_output_weight,
const std::vector<paddle::Tensor>& self_attn_output_bias,
const std::vector<paddle::Tensor>& cross_layernorm_weight,
const std::vector<paddle::Tensor>& cross_layernorm_bias,
const std::vector<paddle::Tensor>& cross_attn_query_weight,
const std::vector<paddle::Tensor>& cross_attn_query_bias,
const std::vector<paddle::Tensor>& cross_attn_key_weight,
const std::vector<paddle::Tensor>& cross_attn_key_bias,
const std::vector<paddle::Tensor>& cross_attn_value_weight,
const std::vector<paddle::Tensor>& cross_attn_value_bias,
const std::vector<paddle::Tensor>& cross_attn_output_weight,
const std::vector<paddle::Tensor>& cross_attn_output_bias,
const std::vector<paddle::Tensor>& ffn_layernorm_weight,
const std::vector<paddle::Tensor>& ffn_layernorm_bias,
const std::vector<paddle::Tensor>& ffn_intermediate_weight,
const std::vector<paddle::Tensor>& ffn_intermediate_bias,
const std::vector<paddle::Tensor>& ffn_output_weight,
const std::vector<paddle::Tensor>& ffn_output_bias,
const paddle::Tensor& decoder_layernorm_weight,
const paddle::Tensor& decoder_layernorm_bias,
const paddle::Tensor& embedding_weight,
const paddle::Tensor& embedding_bias,
const paddle::Tensor& position_encoding_table,
paddle::Tensor& output_ids,
paddle::Tensor& parent_ids,
paddle::Tensor& sequence_length,
std::string decoding_strategy,
int beam_size,
int topk,
float topp,
int head_num_,
int size_per_head_,
int num_layer_,
int start_id_,
int end_id_,
int64_t max_seq_len_,
float beam_search_diversity_rate_,
float alpha,
cublasHandle_t cublas_handle_,
cudaStream_t stream) {
int beam_width_ = (decoding_strategy == "beam_search" ||
decoding_strategy == "beam_search_v2")
? beam_size
: 1;
int candidate_num_ = (decoding_strategy == "topk_sampling" ||
decoding_strategy == "topp_sampling")
? topk
: 1;
float probability_threshold_ = (decoding_strategy == "topk_sampling" ||
decoding_strategy == "topp_sampling")
? topp
: 0.0;
auto input_dims = input.shape();
int batch_size_ = (decoding_strategy == "beam_search" ||
decoding_strategy == "beam_search_v2")
? input_dims[0] / beam_width_
: input_dims[0];
const int memory_max_seq_len = input_dims[1];
const int memory_hidden_dim = input_dims[2];
const int vocab_size = word_emb.shape()[0];
typedef PDTraits<D> traits_;
typedef typename traits_::DataType DataType_;
typedef typename traits_::data_t data_t_;
DecodingInitParam<DataType_> decoding_params;
decoding_params.cublas_handle = cublas_handle_;
decoding_params.output_ids = output_ids.mutable_data<int>(input.place());
decoding_params.parent_ids = parent_ids.mutable_data<int>(input.place());
decoding_params.sequence_length =
sequence_length.mutable_data<int>(input.place());
typedef DecoderTransformerTraits<traits_::OpType> DecodingTraits_;
decoding_params.stream = stream;
fastertransformer::Allocator<AllocatorType::PD> allocator_(stream);
decoding_params.memory_tensor =
reinterpret_cast<const DataType_*>(input.data<data_t_>());
decoding_params.memory_sequence_length = memory_sequence_length.data<int>();
DecoderInitParam<DataType_>* params =
new DecoderInitParam<DataType_>[num_layer_];
for (int i = 0; i < num_layer_; i++) {
params[i].stream = stream;
params[i].cublas_handle = cublas_handle_;
// self attn
params[i].self_layernorm.gamma = reinterpret_cast<const DataType_*>(
self_layernorm_weight[i].data<data_t_>());
params[i].self_layernorm.beta = reinterpret_cast<const DataType_*>(
self_layernorm_bias[i].data<data_t_>());
// query
params[i].self_attention.query_weight.kernel =
reinterpret_cast<const DataType_*>(
self_attn_query_weight[i].data<data_t_>());
params[i].self_attention.query_weight.bias =
reinterpret_cast<const DataType_*>(
self_attn_query_bias[i].data<data_t_>());
// key
params[i].self_attention.key_weight.kernel =
reinterpret_cast<const DataType_*>(
self_attn_key_weight[i].data<data_t_>());
params[i].self_attention.key_weight.bias =
reinterpret_cast<const DataType_*>(
self_attn_key_bias[i].data<data_t_>());
// value
params[i].self_attention.value_weight.kernel =
reinterpret_cast<const DataType_*>(
self_attn_value_weight[i].data<data_t_>());
params[i].self_attention.value_weight.bias =
reinterpret_cast<const DataType_*>(
self_attn_value_bias[i].data<data_t_>());
// out proj
params[i].self_attention.attention_output_weight.kernel =
reinterpret_cast<const DataType_*>(
self_attn_output_weight[i].data<data_t_>());
params[i].self_attention.attention_output_weight.bias =
reinterpret_cast<const DataType_*>(
self_attn_output_bias[i].data<data_t_>());
// cross
params[i].cross_layernorm.gamma = reinterpret_cast<const DataType_*>(
cross_layernorm_weight[i].data<data_t_>());
params[i].cross_layernorm.beta = reinterpret_cast<const DataType_*>(
cross_layernorm_bias[i].data<data_t_>());
// query
params[i].cross_attention.query_weight.kernel =
reinterpret_cast<const DataType_*>(
cross_attn_query_weight[i].data<data_t_>());
params[i].cross_attention.query_weight.bias =
reinterpret_cast<const DataType_*>(
cross_attn_query_bias[i].data<data_t_>());
// key
params[i].cross_attention.key_weight.kernel =
reinterpret_cast<const DataType_*>(
cross_attn_key_weight[i].data<data_t_>());
params[i].cross_attention.key_weight.bias =
reinterpret_cast<const DataType_*>(
cross_attn_key_bias[i].data<data_t_>());
// value
params[i].cross_attention.value_weight.kernel =
reinterpret_cast<const DataType_*>(
cross_attn_value_weight[i].data<data_t_>());
params[i].cross_attention.value_weight.bias =
reinterpret_cast<const DataType_*>(
cross_attn_value_bias[i].data<data_t_>());
// out proj
params[i].cross_attention.attention_output_weight.kernel =
reinterpret_cast<const DataType_*>(
cross_attn_output_weight[i].data<data_t_>());
params[i].cross_attention.attention_output_weight.bias =
reinterpret_cast<const DataType_*>(
cross_attn_output_bias[i].data<data_t_>());
// ffn
params[i].ffn_layernorm.gamma = reinterpret_cast<const DataType_*>(
ffn_layernorm_weight[i].data<data_t_>());
params[i].ffn_layernorm.beta = reinterpret_cast<const DataType_*>(
ffn_layernorm_bias[i].data<data_t_>());
// intermediate proj
params[i].ffn.intermediate_weight.kernel =
reinterpret_cast<const DataType_*>(
ffn_intermediate_weight[i].data<data_t_>());
params[i].ffn.intermediate_weight.bias = reinterpret_cast<const DataType_*>(
ffn_intermediate_bias[i].data<data_t_>());
// out proj
params[i].ffn.output_weight.kernel = reinterpret_cast<const DataType_*>(
ffn_output_weight[i].data<data_t_>());
params[i].ffn.output_weight.bias =
reinterpret_cast<const DataType_*>(ffn_output_bias[i].data<data_t_>());
}
decoding_params.layernorm.gamma = reinterpret_cast<const DataType_*>(
decoder_layernorm_weight.data<data_t_>());
decoding_params.layernorm.beta = reinterpret_cast<const DataType_*>(
decoder_layernorm_bias.data<data_t_>());
// for embedding
decoding_params.embedding_table =
reinterpret_cast<const DataType_*>(word_emb.data<data_t_>());
// for weight sharing matmul
decoding_params.embedding_kernel =
reinterpret_cast<const DataType_*>(embedding_weight.data<data_t_>());
// NOTE: the data type of the embedding bias for logits is different
// between decoding with beam search and top-k/top-p sampling in
// Faster Transformer when using float16.
if ("beam_search" == decoding_strategy ||
"beam_search_v2" == decoding_strategy) {
// for matmul bias
decoding_params.embedding_bias =
reinterpret_cast<const float*>(embedding_bias.data<float>());
} else if ("topk_sampling" == decoding_strategy ||
"topp_sampling" == decoding_strategy) {
decoding_params.embedding_bias_T =
reinterpret_cast<const DataType_*>(embedding_bias.data<data_t_>());
}
decoding_params.position_encoding_table = reinterpret_cast<const DataType_*>(
position_encoding_table.data<data_t_>());
if ("beam_search" == decoding_strategy) {
DecodingBeamsearch<DecodingTraits_::OpType>* decoding_beamsearch_;
decoding_beamsearch_ = new DecodingBeamsearch<DecodingTraits_::OpType>(
allocator_,
batch_size_,
beam_width_,
max_seq_len_,
head_num_,
size_per_head_,
vocab_size,
num_layer_,
memory_hidden_dim,
memory_max_seq_len,
start_id_,
end_id_,
beam_search_diversity_rate_,
false,
false,
alpha,
false,
2,
ActivationType::GELU);
decoding_beamsearch_->forward(params, decoding_params);
delete decoding_beamsearch_;
} else if ("beam_search_v2" == decoding_strategy) {
DecodingBeamsearch<DecodingTraits_::OpType>* decoding_beamsearch_;
decoding_beamsearch_ = new DecodingBeamsearch<DecodingTraits_::OpType>(
allocator_,
batch_size_,
beam_width_,
max_seq_len_,
head_num_,
size_per_head_,
vocab_size,
num_layer_,
memory_hidden_dim,
memory_max_seq_len,
start_id_,
end_id_,
beam_search_diversity_rate_,
true, // is_fuse_topk_softMax_
true, // keep_alive_beam_
alpha,
false,
2,
ActivationType::GELU);
decoding_beamsearch_->forward(params, decoding_params);
delete decoding_beamsearch_;
} else if ("topk_sampling" == decoding_strategy ||
"topp_sampling" == decoding_strategy) {
DecodingSampling<DecodingTraits_::OpType>* decoding_sampling_;
decoding_sampling_ =
new DecodingSampling<DecodingTraits_::OpType>(allocator_,
batch_size_,
max_seq_len_,
head_num_,
size_per_head_,
vocab_size,
num_layer_,
memory_hidden_dim,
memory_max_seq_len,
start_id_,
end_id_,
candidate_num_,
probability_threshold_,
false,
2,
ActivationType::GELU);
decoding_sampling_->forward(params, decoding_params);
delete decoding_sampling_;
} else {
PD_THROW(
"Only beam_search, topk_sampling and topp_sampling are supported for "
"Faster Transformer. ");
}
delete[] params;
return {output_ids, parent_ids, sequence_length};
}
std::vector<paddle::Tensor> BartDecodingCUDAForward(
const paddle::Tensor& input,
const paddle::Tensor& mem_seq_len,
const paddle::Tensor& word_embedding,
const std::vector<paddle::Tensor>& self_ln_weight,
const std::vector<paddle::Tensor>& self_ln_bias,
const std::vector<paddle::Tensor>& self_q_weight,
const std::vector<paddle::Tensor>& self_q_bias,
const std::vector<paddle::Tensor>& self_k_weight,
const std::vector<paddle::Tensor>& self_k_bias,
const std::vector<paddle::Tensor>& self_v_weight,
const std::vector<paddle::Tensor>& self_v_bias,
const std::vector<paddle::Tensor>& self_out_weight,
const std::vector<paddle::Tensor>& self_out_bias,
const std::vector<paddle::Tensor>& cross_ln_weight,
const std::vector<paddle::Tensor>& cross_ln_bias,
const std::vector<paddle::Tensor>& cross_q_weight,
const std::vector<paddle::Tensor>& cross_q_bias,
const std::vector<paddle::Tensor>& cross_k_weight,
const std::vector<paddle::Tensor>& cross_k_bias,
const std::vector<paddle::Tensor>& cross_v_weight,
const std::vector<paddle::Tensor>& cross_v_bias,
const std::vector<paddle::Tensor>& cross_out_weight,
const std::vector<paddle::Tensor>& cross_out_bias,
const std::vector<paddle::Tensor>& ffn_ln_weight,
const std::vector<paddle::Tensor>& ffn_ln_bias,
const std::vector<paddle::Tensor>& ffn_inter_weight,
const std::vector<paddle::Tensor>& ffn_inter_bias,
const std::vector<paddle::Tensor>& ffn_out_weight,
const std::vector<paddle::Tensor>& ffn_out_bias,
const paddle::Tensor& decoder_ln_weight,
const paddle::Tensor& decoder_ln_bias,
const paddle::Tensor& embedding_weight,
const paddle::Tensor& embedding_bias,
const paddle::Tensor& positional_embedding_weight,
paddle::Tensor& output_ids,
paddle::Tensor& parent_ids,
paddle::Tensor& sequence_length,
std::string decoding_strategy,
int beam_size,
int topk,
float topp,
int n_head,
int size_per_head,
int num_layer,
int bos_id,
int eos_id,
int64_t max_len,
float beam_search_diversity_rate,
float alpha) {
auto stream = input.stream();
cublasHandle_t cublas_handle_;
cublasCreate(&cublas_handle_);
cublasSetStream(cublas_handle_, stream);
std::vector<paddle::Tensor> ret;
switch (input.type()) {
case paddle::DataType::FLOAT16: {
ret = bart_decoding_kernel<paddle::DataType::FLOAT16>(
input,
mem_seq_len,
word_embedding,
self_ln_weight,
self_ln_bias,
self_q_weight,
self_q_bias,
self_k_weight,
self_k_bias,
self_v_weight,
self_v_bias,
self_out_weight,
self_out_bias,
cross_ln_weight,
cross_ln_bias,
cross_q_weight,
cross_q_bias,
cross_k_weight,
cross_k_bias,
cross_v_weight,
cross_v_bias,
cross_out_weight,
cross_out_bias,
ffn_ln_weight,
ffn_ln_bias,
ffn_inter_weight,
ffn_inter_bias,
ffn_out_weight,
ffn_out_bias,
decoder_ln_weight,
decoder_ln_bias,
embedding_weight,
embedding_bias,
positional_embedding_weight,
output_ids,
parent_ids,
sequence_length,
decoding_strategy,
beam_size,
topk,
topp,
n_head,
size_per_head,
num_layer,
bos_id,
eos_id,
max_len,
beam_search_diversity_rate,
alpha,
cublas_handle_,
stream);
break;
}
case paddle::DataType::FLOAT32: {
ret = bart_decoding_kernel<paddle::DataType::FLOAT32>(
input,
mem_seq_len,
word_embedding,
self_ln_weight,
self_ln_bias,
self_q_weight,
self_q_bias,
self_k_weight,
self_k_bias,
self_v_weight,
self_v_bias,
self_out_weight,
self_out_bias,
cross_ln_weight,
cross_ln_bias,
cross_q_weight,
cross_q_bias,
cross_k_weight,
cross_k_bias,
cross_v_weight,
cross_v_bias,
cross_out_weight,
cross_out_bias,
ffn_ln_weight,
ffn_ln_bias,
ffn_inter_weight,
ffn_inter_bias,
ffn_out_weight,
ffn_out_bias,
decoder_ln_weight,
decoder_ln_bias,
embedding_weight,
embedding_bias,
positional_embedding_weight,
output_ids,
parent_ids,
sequence_length,
decoding_strategy,
beam_size,
topk,
topp,
n_head,
size_per_head,
num_layer,
bos_id,
eos_id,
max_len,
beam_search_diversity_rate,
alpha,
cublas_handle_,
stream);
break;
}
default: {
PD_THROW(
"NOT supported data type. "
"Only float16 and float32 are supported. ");
break;
}
}
cublasDestroy(cublas_handle_);
return ret;
}
|
e4305ad6cffd9fab6b2e585108465378b436a1b1.hip | // !!! This is a file automatically generated by hipify!!!
#include <hip/hip_runtime.h>
#include <cstdlib>
#include <iostream>
#include <time.h>
#include "CudaPhysics.cuh"
#include "CudaKernels.cuh"
void ConstructMatrixOfInfluenceCoefficientsCuda(
const float *h_cp_x,
const float *h_cp_y,
const float *h_cp_z,
const float *h_n_x,
const float *h_n_y,
const float *h_n_z,
const float *h_vs_x,
const float *h_vs_y,
const float *h_vs_z,
const float *h_ve_x,
const float *h_ve_y,
const float *h_ve_z,
float *h_A, // Output influence coefficient matrix.
int noOfUnknownVortexStrengths, // This is basically the number of surface panels.
float RankineCoreRadius,
char rankineAlgorithmIndex,
int FrameNumber)
{
// DOM: Called from ManageCalculationOfMatrixOfCoefficients in ITPhysics.cpp.
hipError_t err; // Error code to check return values for CUDA calls.
// DOM: Calculate the sizes of the arrays passed in to this function.
int totalNumberOfCudaComputations = noOfUnknownVortexStrengths*noOfUnknownVortexStrengths; // This is the number of elements in the A matrix, and accounts for the influence of each panel on each panel.
size_t sizeRowFloat = noOfUnknownVortexStrengths * sizeof(float); // Memory required for a row of floats.
size_t sizeMatrixFloat = totalNumberOfCudaComputations * sizeof(float); // Memory required for a matrix of floats.
int maxNoOfVortices = 4;
// ============================================================================
// Allocate the GPU memory.
// ============================================================================
// Colocation point coordinates.
float *d_cp_x = NULL;
err = hipMalloc((void **)&d_cp_x, sizeRowFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_cp_y = NULL;
err = hipMalloc((void **)&d_cp_y, sizeRowFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_cp_z = NULL;
err = hipMalloc((void **)&d_cp_z, sizeRowFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// Panel Normals.
float *d_n_x = NULL;
err = hipMalloc((void **)&d_n_x, sizeRowFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_n_y = NULL;
err = hipMalloc((void **)&d_n_y, sizeRowFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_n_z = NULL;
err = hipMalloc((void **)&d_n_z, sizeRowFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// Vortex end point coordinates.
float *d_vs_x = NULL;
err = hipMalloc((void **)&d_vs_x, sizeRowFloat*maxNoOfVortices); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_vs_y = NULL;
err = hipMalloc((void **)&d_vs_y, sizeRowFloat*maxNoOfVortices); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_vs_z = NULL;
err = hipMalloc((void **)&d_vs_z, sizeRowFloat*maxNoOfVortices); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_ve_x = NULL;
err = hipMalloc((void **)&d_ve_x, sizeRowFloat*maxNoOfVortices); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_ve_y = NULL;
err = hipMalloc((void **)&d_ve_y, sizeRowFloat*maxNoOfVortices); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_ve_z = NULL;
err = hipMalloc((void **)&d_ve_z, sizeRowFloat*maxNoOfVortices); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// The memory for the square matrix of influence coefficient entries.
float *d_A = NULL;
err = hipMalloc((void **)&d_A, sizeMatrixFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// ============================================================================
// Copy host memory to device memory.
// ============================================================================
err = hipMemcpy(d_cp_x, h_cp_x, sizeRowFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_cp_y, h_cp_y, sizeRowFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_cp_z, h_cp_z, sizeRowFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_n_x, h_n_x, sizeRowFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_n_y, h_n_y, sizeRowFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_n_z, h_n_z, sizeRowFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_vs_x, h_vs_x, sizeRowFloat*maxNoOfVortices, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_vs_y, h_vs_y, sizeRowFloat*maxNoOfVortices, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_vs_z, h_vs_z, sizeRowFloat*maxNoOfVortices, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_ve_x, h_ve_x, sizeRowFloat*maxNoOfVortices, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_ve_y, h_ve_y, sizeRowFloat*maxNoOfVortices, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_ve_z, h_ve_z, sizeRowFloat*maxNoOfVortices, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_A, h_A, sizeMatrixFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// ============================================================================
// Call the Kernel.
// ============================================================================
int noOfElementsInEachThread = 1;
int totalNumberOfThreads = (totalNumberOfCudaComputations + noOfElementsInEachThread - 1) / noOfElementsInEachThread;
int threadsPerBlock = 256; // 256; // When running out of resources, try reduce the threadsPerBlock.
int totalNumberOfBlocks = (totalNumberOfThreads + threadsPerBlock - 1) / threadsPerBlock;
int noOfBlocksX = 64;
int noOfBlocksY = (totalNumberOfBlocks + noOfBlocksX - 1) / noOfBlocksX;
dim3 grid(noOfBlocksX, noOfBlocksY, 1);
dim3 block(threadsPerBlock, 1, 1);
// Call noOfBlocksX*noOfBlocksY*threadsPerBlock instances of the kernel.
kernelInfluenceCoefficient << <grid, block >> > (
d_cp_x,
d_cp_y,
d_cp_z,
d_n_x,
d_n_y,
d_n_z,
d_vs_x,
d_vs_y,
d_vs_z,
d_ve_x,
d_ve_y,
d_ve_z,
d_A,
noOfUnknownVortexStrengths,
rankineAlgorithmIndex,
RankineCoreRadius,
FrameNumber);
// Synchronize the CUDA kernels.
hipDeviceSynchronize();
// Deal with any errors.
err = hipGetLastError();
if (err != hipSuccess)
{
std::cout << "Failed to launch kernelInfluenceCoefficient kernel (error code " << hipGetErrorString(err) << ")" << std::endl;
exit(EXIT_FAILURE);
}
// ====================================================================================================
// Copy the coefficient vector back from the GPU device.
// Copy the device result vector in device memory to the host result vector in host memory.
// ====================================================================================================
err = hipMemcpy(h_A, d_A, sizeMatrixFloat, hipMemcpyDeviceToHost);
// ============================================================================
// Free the GPU memory.
// ============================================================================
err = hipFree(d_cp_x); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_cp_y); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_cp_z); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_n_x); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_n_y); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_n_z); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_vs_x); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_vs_y); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_vs_z); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_ve_x); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_ve_y); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_ve_z); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_A); if (err != hipSuccess) { exit(EXIT_FAILURE); }
}
void ComputeVelocitiesForBatchOfPointsCuda(
const float *h_cp_x,
const float *h_cp_y,
const float *h_cp_z,
const float *h_vs_x,
const float *h_vs_y,
const float *h_vs_z,
const float *h_ve_x,
const float *h_ve_y,
const float *h_ve_z,
float *h_cp_vx,
float *h_cp_vy,
float *h_cp_vz,
const float *h_vorticities,
int noOfVorticesPerPanel,
int noOfSubjectPanels,
int noOfVelocityPredictions,
int rankineAlgorithmIndex)
{
// DOM: Error code to check return values for CUDA calls.
hipError_t err;
// DOM: Calculate the sizes of the arrays passed in to this function.
size_t sizeSubjectPanelsFloat = noOfSubjectPanels * sizeof(float); // Memory required for a row of floats.
size_t sizeVelocityPredictionsFloat = noOfVelocityPredictions * sizeof(float); // Memory required for noOfVelocityPredictions floats.
// ============================================================================
// Allocate the GPU memory.
// ============================================================================
// Object point coordinates.
float *d_cp_x = NULL;
err = hipMalloc((void **)&d_cp_x, sizeVelocityPredictionsFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_cp_y = NULL;
err = hipMalloc((void **)&d_cp_y, sizeVelocityPredictionsFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_cp_z = NULL;
err = hipMalloc((void **)&d_cp_z, sizeVelocityPredictionsFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// Vortex end point coordinates.
float *d_vs_x = NULL;
err = hipMalloc((void **)&d_vs_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_vs_y = NULL;
err = hipMalloc((void **)&d_vs_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_vs_z = NULL;
err = hipMalloc((void **)&d_vs_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_ve_x = NULL;
err = hipMalloc((void **)&d_ve_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_ve_y = NULL;
err = hipMalloc((void **)&d_ve_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_ve_z = NULL;
err = hipMalloc((void **)&d_ve_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// The memory for the object point velocities.
float *d_cp_vx = NULL;
err = hipMalloc((void **)&d_cp_vx, sizeVelocityPredictionsFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_cp_vy = NULL;
err = hipMalloc((void **)&d_cp_vy, sizeVelocityPredictionsFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
float *d_cp_vz = NULL;
err = hipMalloc((void **)&d_cp_vz, sizeVelocityPredictionsFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// The memory for the panel vorticities
float *d_vorticities = NULL;
err = hipMalloc((void **)&d_vorticities, sizeSubjectPanelsFloat); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// ============================================================================
// Copy host memory to device memory.
// ============================================================================
err = hipMemcpy(d_cp_x, h_cp_x, sizeVelocityPredictionsFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_cp_y, h_cp_y, sizeVelocityPredictionsFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_cp_z, h_cp_z, sizeVelocityPredictionsFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_vs_x, h_vs_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_vs_y, h_vs_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_vs_z, h_vs_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_ve_x, h_ve_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_ve_y, h_ve_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_ve_z, h_ve_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_cp_vx, h_cp_vx, sizeVelocityPredictionsFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_cp_vy, h_cp_vy, sizeVelocityPredictionsFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_cp_vz, h_cp_vz, sizeVelocityPredictionsFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipMemcpy(d_vorticities, h_vorticities, sizeSubjectPanelsFloat, hipMemcpyHostToDevice); if (err != hipSuccess) { exit(EXIT_FAILURE); }
// =========================================================================================
// Call the Kernel.
// =========================================================================================
int noOfElementsInEachThread = 1; // The number of object point array elements computed by each instance of the kernel function.
int totalNumberOfThreads = (noOfVelocityPredictions + noOfElementsInEachThread - 1) / noOfElementsInEachThread;
// TODO: Temporarily reduce threadsPerBlock to 1 to see if it helps with cuPrint.
int threadsPerBlock = 1; // 256; // When running out of resources, try reduce the threadsPerBlock.
int totalNumberOfBlocks = (totalNumberOfThreads + threadsPerBlock - 1) / threadsPerBlock;
int noOfBlocksX = 64;
int noOfBlocksY = (totalNumberOfBlocks + noOfBlocksX - 1) / noOfBlocksX;
dim3 grid(noOfBlocksX, noOfBlocksY, 1);
dim3 block(threadsPerBlock, 1, 1);
// Initialize tranche variables for tranche execution.
int threadsPerTranche = 6000;
int noOfTranches = (noOfVelocityPredictions + threadsPerTranche - 1) / threadsPerTranche;
for (int trancheIndex = 0; trancheIndex < noOfTranches; trancheIndex++)
{
clock_t time_end;
time_end = clock() + 10 * CLOCKS_PER_SEC / 1000;
while (clock() < time_end)
{
}
// Sort out tranche start index.
int indexOfStartOfTranche = trancheIndex*threadsPerTranche;
// Call noOfBlocksX*noOfBlocksY*threadsPerBlock instances of the kernel.
kernelFunctionPredictVelocityAtPoint << <grid, block >> > (
d_cp_x,
d_cp_y,
d_cp_z,
d_vs_x,
d_vs_y,
d_vs_z,
d_ve_x,
d_ve_y,
d_ve_z,
d_cp_vx,
d_cp_vy,
d_cp_vz,
d_vorticities,
noOfVorticesPerPanel,
noOfSubjectPanels,
noOfElementsInEachThread, // Usually set to 1.
noOfVelocityPredictions,
threadsPerTranche,
indexOfStartOfTranche,
rankineAlgorithmIndex);
hipDeviceSynchronize();
// Deal with any errors.
err = hipGetLastError();
if (err != hipSuccess)
{
std::cout << "Failed to launch kernelFunctionPredictVelocityAtPoint kernel (error code " << hipGetErrorString(err) << ")" << std::endl;
exit(EXIT_FAILURE);
}
} // End of for tranches.
err = hipMemcpy(h_cp_vx, d_cp_vx, sizeVelocityPredictionsFloat, hipMemcpyDeviceToHost);
err = hipMemcpy(h_cp_vy, d_cp_vy, sizeVelocityPredictionsFloat, hipMemcpyDeviceToHost);
err = hipMemcpy(h_cp_vz, d_cp_vz, sizeVelocityPredictionsFloat, hipMemcpyDeviceToHost);
// Free the GPU memory.
err = hipFree(d_cp_x); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_cp_y); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_cp_z); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_vs_x); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_vs_y); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_vs_z); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_ve_x); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_ve_y); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_ve_z); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_cp_vx); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_cp_vy); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_cp_vz); if (err != hipSuccess) { exit(EXIT_FAILURE); }
err = hipFree(d_vorticities); if (err != hipSuccess) { exit(EXIT_FAILURE); }
} // End of ComputeVelocitiesForBatchOfPointsCuda. | e4305ad6cffd9fab6b2e585108465378b436a1b1.cu | #include <cuda_runtime.h>
#include <cstdlib>
#include <iostream>
#include <time.h>
#include "CudaPhysics.cuh"
#include "CudaKernels.cuh"
void ConstructMatrixOfInfluenceCoefficientsCuda(
const float *h_cp_x,
const float *h_cp_y,
const float *h_cp_z,
const float *h_n_x,
const float *h_n_y,
const float *h_n_z,
const float *h_vs_x,
const float *h_vs_y,
const float *h_vs_z,
const float *h_ve_x,
const float *h_ve_y,
const float *h_ve_z,
float *h_A, // Output influence coefficient matrix.
int noOfUnknownVortexStrengths, // This is basically the number of surface panels.
float RankineCoreRadius,
char rankineAlgorithmIndex,
int FrameNumber)
{
// DOM: Called from ManageCalculationOfMatrixOfCoefficients in ITPhysics.cpp.
cudaError_t err; // Error code to check return values for CUDA calls.
// DOM: Calculate the sizes of the arrays passed in to this function.
int totalNumberOfCudaComputations = noOfUnknownVortexStrengths*noOfUnknownVortexStrengths; // This is the number of elements in the A matrix, and accounts for the influence of each panel on each panel.
size_t sizeRowFloat = noOfUnknownVortexStrengths * sizeof(float); // Memory required for a row of floats.
size_t sizeMatrixFloat = totalNumberOfCudaComputations * sizeof(float); // Memory required for a matrix of floats.
int maxNoOfVortices = 4;
// ============================================================================
// Allocate the GPU memory.
// ============================================================================
// Colocation point coordinates.
float *d_cp_x = NULL;
err = cudaMalloc((void **)&d_cp_x, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_cp_y = NULL;
err = cudaMalloc((void **)&d_cp_y, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_cp_z = NULL;
err = cudaMalloc((void **)&d_cp_z, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// Panel Normals.
float *d_n_x = NULL;
err = cudaMalloc((void **)&d_n_x, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_n_y = NULL;
err = cudaMalloc((void **)&d_n_y, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_n_z = NULL;
err = cudaMalloc((void **)&d_n_z, sizeRowFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// Vortex end point coordinates.
float *d_vs_x = NULL;
err = cudaMalloc((void **)&d_vs_x, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_vs_y = NULL;
err = cudaMalloc((void **)&d_vs_y, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_vs_z = NULL;
err = cudaMalloc((void **)&d_vs_z, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_ve_x = NULL;
err = cudaMalloc((void **)&d_ve_x, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_ve_y = NULL;
err = cudaMalloc((void **)&d_ve_y, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_ve_z = NULL;
err = cudaMalloc((void **)&d_ve_z, sizeRowFloat*maxNoOfVortices); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// The memory for the square matrix of influence coefficient entries.
float *d_A = NULL;
err = cudaMalloc((void **)&d_A, sizeMatrixFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// ============================================================================
// Copy host memory to device memory.
// ============================================================================
err = cudaMemcpy(d_cp_x, h_cp_x, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_cp_y, h_cp_y, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_cp_z, h_cp_z, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_n_x, h_n_x, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_n_y, h_n_y, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_n_z, h_n_z, sizeRowFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_vs_x, h_vs_x, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_vs_y, h_vs_y, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_vs_z, h_vs_z, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_ve_x, h_ve_x, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_ve_y, h_ve_y, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_ve_z, h_ve_z, sizeRowFloat*maxNoOfVortices, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_A, h_A, sizeMatrixFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// ============================================================================
// Call the Kernel.
// ============================================================================
int noOfElementsInEachThread = 1;
int totalNumberOfThreads = (totalNumberOfCudaComputations + noOfElementsInEachThread - 1) / noOfElementsInEachThread;
int threadsPerBlock = 256; // 256; // When running out of resources, try reduce the threadsPerBlock.
int totalNumberOfBlocks = (totalNumberOfThreads + threadsPerBlock - 1) / threadsPerBlock;
int noOfBlocksX = 64;
int noOfBlocksY = (totalNumberOfBlocks + noOfBlocksX - 1) / noOfBlocksX;
dim3 grid(noOfBlocksX, noOfBlocksY, 1);
dim3 block(threadsPerBlock, 1, 1);
// Call noOfBlocksX*noOfBlocksY*threadsPerBlock instances of the kernel.
kernelInfluenceCoefficient << <grid, block >> > (
d_cp_x,
d_cp_y,
d_cp_z,
d_n_x,
d_n_y,
d_n_z,
d_vs_x,
d_vs_y,
d_vs_z,
d_ve_x,
d_ve_y,
d_ve_z,
d_A,
noOfUnknownVortexStrengths,
rankineAlgorithmIndex,
RankineCoreRadius,
FrameNumber);
// Synchronize the CUDA kernels.
cudaDeviceSynchronize();
// Deal with any errors.
err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cout << "Failed to launch kernelInfluenceCoefficient kernel (error code " << cudaGetErrorString(err) << ")" << std::endl;
exit(EXIT_FAILURE);
}
// ====================================================================================================
// Copy the coefficient vector back from the GPU device.
// Copy the device result vector in device memory to the host result vector in host memory.
// ====================================================================================================
err = cudaMemcpy(h_A, d_A, sizeMatrixFloat, cudaMemcpyDeviceToHost);
// ============================================================================
// Free the GPU memory.
// ============================================================================
err = cudaFree(d_cp_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_cp_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_cp_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_n_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_n_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_n_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_vs_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_vs_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_vs_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_ve_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_ve_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_ve_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_A); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
}
void ComputeVelocitiesForBatchOfPointsCuda(
const float *h_cp_x,
const float *h_cp_y,
const float *h_cp_z,
const float *h_vs_x,
const float *h_vs_y,
const float *h_vs_z,
const float *h_ve_x,
const float *h_ve_y,
const float *h_ve_z,
float *h_cp_vx,
float *h_cp_vy,
float *h_cp_vz,
const float *h_vorticities,
int noOfVorticesPerPanel,
int noOfSubjectPanels,
int noOfVelocityPredictions,
int rankineAlgorithmIndex)
{
// DOM: Error code to check return values for CUDA calls.
cudaError_t err;
// DOM: Calculate the sizes of the arrays passed in to this function.
size_t sizeSubjectPanelsFloat = noOfSubjectPanels * sizeof(float); // Memory required for a row of floats.
size_t sizeVelocityPredictionsFloat = noOfVelocityPredictions * sizeof(float); // Memory required for noOfVelocityPredictions floats.
// ============================================================================
// Allocate the GPU memory.
// ============================================================================
// Object point coordinates.
float *d_cp_x = NULL;
err = cudaMalloc((void **)&d_cp_x, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_cp_y = NULL;
err = cudaMalloc((void **)&d_cp_y, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_cp_z = NULL;
err = cudaMalloc((void **)&d_cp_z, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// Vortex end point coordinates.
float *d_vs_x = NULL;
err = cudaMalloc((void **)&d_vs_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_vs_y = NULL;
err = cudaMalloc((void **)&d_vs_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_vs_z = NULL;
err = cudaMalloc((void **)&d_vs_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_ve_x = NULL;
err = cudaMalloc((void **)&d_ve_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_ve_y = NULL;
err = cudaMalloc((void **)&d_ve_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_ve_z = NULL;
err = cudaMalloc((void **)&d_ve_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// The memory for the object point velocities.
float *d_cp_vx = NULL;
err = cudaMalloc((void **)&d_cp_vx, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_cp_vy = NULL;
err = cudaMalloc((void **)&d_cp_vy, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
float *d_cp_vz = NULL;
err = cudaMalloc((void **)&d_cp_vz, sizeVelocityPredictionsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// The memory for the panel vorticities
float *d_vorticities = NULL;
err = cudaMalloc((void **)&d_vorticities, sizeSubjectPanelsFloat); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// ============================================================================
// Copy host memory to device memory.
// ============================================================================
err = cudaMemcpy(d_cp_x, h_cp_x, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_cp_y, h_cp_y, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_cp_z, h_cp_z, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_vs_x, h_vs_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_vs_y, h_vs_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_vs_z, h_vs_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_ve_x, h_ve_x, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_ve_y, h_ve_y, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_ve_z, h_ve_z, sizeSubjectPanelsFloat*noOfVorticesPerPanel, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_cp_vx, h_cp_vx, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_cp_vy, h_cp_vy, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_cp_vz, h_cp_vz, sizeVelocityPredictionsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaMemcpy(d_vorticities, h_vorticities, sizeSubjectPanelsFloat, cudaMemcpyHostToDevice); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
// =========================================================================================
// Call the Kernel.
// =========================================================================================
int noOfElementsInEachThread = 1; // The number of object point array elements computed by each instance of the kernel function.
int totalNumberOfThreads = (noOfVelocityPredictions + noOfElementsInEachThread - 1) / noOfElementsInEachThread;
// TODO: Temporarily reduce threadsPerBlock to 1 to see if it helps with cuPrint.
int threadsPerBlock = 1; // 256; // When running out of resources, try reduce the threadsPerBlock.
int totalNumberOfBlocks = (totalNumberOfThreads + threadsPerBlock - 1) / threadsPerBlock;
int noOfBlocksX = 64;
int noOfBlocksY = (totalNumberOfBlocks + noOfBlocksX - 1) / noOfBlocksX;
dim3 grid(noOfBlocksX, noOfBlocksY, 1);
dim3 block(threadsPerBlock, 1, 1);
// Initialize tranche variables for tranche execution.
int threadsPerTranche = 6000;
int noOfTranches = (noOfVelocityPredictions + threadsPerTranche - 1) / threadsPerTranche;
for (int trancheIndex = 0; trancheIndex < noOfTranches; trancheIndex++)
{
clock_t time_end;
time_end = clock() + 10 * CLOCKS_PER_SEC / 1000;
while (clock() < time_end)
{
}
// Sort out tranche start index.
int indexOfStartOfTranche = trancheIndex*threadsPerTranche;
// Call noOfBlocksX*noOfBlocksY*threadsPerBlock instances of the kernel.
kernelFunctionPredictVelocityAtPoint << <grid, block >> > (
d_cp_x,
d_cp_y,
d_cp_z,
d_vs_x,
d_vs_y,
d_vs_z,
d_ve_x,
d_ve_y,
d_ve_z,
d_cp_vx,
d_cp_vy,
d_cp_vz,
d_vorticities,
noOfVorticesPerPanel,
noOfSubjectPanels,
noOfElementsInEachThread, // Usually set to 1.
noOfVelocityPredictions,
threadsPerTranche,
indexOfStartOfTranche,
rankineAlgorithmIndex);
cudaDeviceSynchronize();
// Deal with any errors.
err = cudaGetLastError();
if (err != cudaSuccess)
{
std::cout << "Failed to launch kernelFunctionPredictVelocityAtPoint kernel (error code " << cudaGetErrorString(err) << ")" << std::endl;
exit(EXIT_FAILURE);
}
} // End of for tranches.
err = cudaMemcpy(h_cp_vx, d_cp_vx, sizeVelocityPredictionsFloat, cudaMemcpyDeviceToHost);
err = cudaMemcpy(h_cp_vy, d_cp_vy, sizeVelocityPredictionsFloat, cudaMemcpyDeviceToHost);
err = cudaMemcpy(h_cp_vz, d_cp_vz, sizeVelocityPredictionsFloat, cudaMemcpyDeviceToHost);
// Free the GPU memory.
err = cudaFree(d_cp_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_cp_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_cp_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_vs_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_vs_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_vs_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_ve_x); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_ve_y); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_ve_z); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_cp_vx); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_cp_vy); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_cp_vz); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
err = cudaFree(d_vorticities); if (err != cudaSuccess) { exit(EXIT_FAILURE); }
} // End of ComputeVelocitiesForBatchOfPointsCuda. |
e7df993abe279de09ea8161190844feefb0403ad.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "GraphicEngineCUDA.cuh"
#include "__graphic_engine_/GraphicEngine.h"
#include<iostream>
void GraphicEngine::AllocateVertex2D() {
unsigned int vertexs2d_size = data_info_.numberOfVertexs * sizeof(Vertex2D);
hipMalloc((void**)& device_vertexs_2d_, vertexs2d_size);
hipHostMalloc((void**)& host_vertexs_2d_, vertexs2d_size);
};
__global__ void ProjectVertexs(const Vertex3D* const vertexs_3d, Vertex2D* const vertexs_2d, const unsigned int number_of_vertexs, const Vertex3D cameraPosition, const Vector3D vectorX, const Vector3D vectorY, const Vector3D vectorZ, const float k) {
int threadIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIndex < number_of_vertexs) {
//Offset
float offset_x = vertexs_3d[threadIndex].x - cameraPosition.x;
float offset_y = vertexs_3d[threadIndex].y - cameraPosition.y;
float offset_z = vertexs_3d[threadIndex].z - cameraPosition.z;
float new_x = offset_x * vectorX.x + offset_y * vectorX.y + offset_z * vectorX.z;
float new_y = offset_x * vectorY.x + offset_y * vectorY.y + offset_z * vectorY.z;
float new_z = offset_x * vectorZ.x + offset_y * vectorZ.y + offset_z * vectorZ.z;
vertexs_2d[threadIndex].x = ( k * new_x / (new_z + k) ) ;
vertexs_2d[threadIndex].y = ( k * new_y / (new_z + k) ) ;
}
}
__global__ void ConvertInDisplayCoordinats(Vertex2D* const vertexs_2d, const unsigned int number_of_vertexs, const unsigned int display_width, const unsigned int display_height) {
int threadIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIndex < number_of_vertexs) {
vertexs_2d[threadIndex].x = vertexs_2d[threadIndex].x * 1000.0f + display_width / 2;
vertexs_2d[threadIndex].y = display_height / 2 - vertexs_2d[threadIndex].y * 1000.0f;
}
}
__global__ void DrawLines(const Vertex2D* const vertexs_2d, const Polygon3D* const device_polygons, const Normal3D* normals, const unsigned int number_of_polygons, RgbPixel* const display_buffer, const unsigned int display_width, const unsigned int display_height, Vector3D camera_dir) {
int threadIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIndex < number_of_polygons * 3) {
const unsigned int polygon_number = threadIndex / 3;
const unsigned int vertex_number = threadIndex % 3;
Normal3D normal = normals[ device_polygons[polygon_number].ratios[vertex_number].normalNumber];
float scalar = camera_dir.x * normal.x + camera_dir.y * normal.y + camera_dir.z * normal.z;
//if (scalar <= 0.0f)
{
const unsigned int first_vertex_index = device_polygons[polygon_number].ratios[vertex_number].vertexNumber;
const unsigned int second_vertex_number = ((vertex_number + 1) < 3) ? (vertex_number + 1) : 0;
const unsigned int second_vertex_index = device_polygons[polygon_number].ratios[second_vertex_number].vertexNumber;
int x1 = vertexs_2d[first_vertex_index].x;
int y1 = vertexs_2d[first_vertex_index].y;
const int x2 = vertexs_2d[second_vertex_index].x;
const int y2 = vertexs_2d[second_vertex_index].y;
const bool coordinats_are_correct = (x1 > 0 && x1 < display_width) && (x2 > 0 && x2 < display_width) && (y1 > 0 && y1 < display_height) && (y2 > 0 && y2 < display_height);
if (coordinats_are_correct) {
const int deltaX = abs(x2 - x1);
const int deltaY = abs(y2 - y1);
const int signX = x1 < x2 ? 1 : -1;
const int signY = y1 < y2 ? 1 : -1;
//
int error = deltaX - deltaY;
display_buffer[display_width * y2 + x2].rgb_reserved = 0;
display_buffer[display_width * y2 + x2].rgb_red = 255;
display_buffer[display_width * y2 + x2].rgb_green = 0;
display_buffer[display_width * y2 + x2].rgb_blue = 0;
while (x1 != x2 || y1 != y2)
{
display_buffer[display_width * y1 + x1].rgb_reserved = 0;
display_buffer[display_width * y1 + x1].rgb_red = 255;
display_buffer[display_width * y1 + x1].rgb_green = 0;
display_buffer[display_width * y1 + x1].rgb_blue = 0;
const int error2 = error * 2;
//
if (error2 > -deltaY)
{
error -= deltaY;
x1 += signX;
}
if (error2 < deltaX)
{
error += deltaX;
y1 += signY;
}
}
}
}
}
}
__global__ void SetScreenColor(RgbPixel* const device_display_buffer, const RgbPixel rgb_pixel, const unsigned int number_of_pixels) {
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_index < number_of_pixels) device_display_buffer[thread_index] = rgb_pixel;
}
void GraphicEngine::SetDisplayBufferColor(const RgbColor& rgb_color)
{
const RgbPixel rgb_pixel = { rgb_color.rgb_blue, rgb_color.rgb_green, rgb_color.rgb_red, rgb_color.rgb_reserved };
const unsigned int number_of_threads = 1024;
const unsigned int number_of_blocks = (1920 * 1080 * sizeof(RgbPixel) + number_of_threads - 1) / number_of_threads;
hipLaunchKernelGGL(( SetScreenColor) , dim3(number_of_blocks), dim3(number_of_threads) , 0, 0, this->device_display_buffer_, rgb_pixel, 1920 * 1080);
}
void CPUCountingProjectVertexs(const Vertex3D* const vertexs_3d, Vertex2D* const vertexs_2d, const unsigned int number_of_vertexs, const Vertex3D cameraPosition, const Vector3D vectorX, const Vector3D vectorY, const Vector3D vectorZ, const float k) {
for (size_t i = 0; i < number_of_vertexs; i++)
{
//Offset
const float offset_x = vertexs_3d[i].x - cameraPosition.x;
const float offset_y = vertexs_3d[i].y - cameraPosition.y;
const float offset_z = vertexs_3d[i].z - cameraPosition.z;
const float new_x = offset_x * vectorX.x + offset_y * vectorX.y + offset_z * vectorX.z;
const float new_y = offset_x * vectorY.x + offset_y * vectorY.y + offset_z * vectorY.z;
const float new_z = offset_x * vectorZ.x + offset_y * vectorZ.y + offset_z * vectorZ.z;
vertexs_2d[i].x = (k * new_x / (new_z + k));
vertexs_2d[i].y = (k * new_y / (new_z + k));
}
}
void CPUCountingConvertInDisplayCoordinats(Vertex2D* const vertexs_2d, const unsigned int number_of_vertexs, const unsigned int display_width, const unsigned int display_height) {
for (size_t i = 0; i < number_of_vertexs; i++)
{
vertexs_2d[i].x = vertexs_2d[i].x * (float) 1000 + display_width / 2;
vertexs_2d[i].y = display_height / 2 - vertexs_2d[i].y * 1000;
}
}
void CPUCountingDrawLines(const Vertex2D* const vertexs_2d, const Polygon3D* const device_polygons, const unsigned int number_of_polygons, RgbPixel* const display_buffer, const unsigned int display_width, const unsigned int display_height) {
{
const unsigned int number_of_lines = number_of_polygons * 3;
for (size_t i = 0; i < number_of_lines; i++)
{
const unsigned int polygon_number = i / 3;
const unsigned int vertex_number = i % 3;
const unsigned int first_vertex_index = device_polygons[polygon_number].ratios[vertex_number].vertexNumber;
const unsigned int second_vertex_number = ((vertex_number + 1) < 3) ? (vertex_number + 1) : 0;
const unsigned int second_vertex_index = device_polygons[polygon_number].ratios[second_vertex_number].vertexNumber;
int x1 = vertexs_2d[first_vertex_index].x;
int y1 = vertexs_2d[first_vertex_index].y;
int x2 = vertexs_2d[second_vertex_index].x;
int y2 = vertexs_2d[second_vertex_index].y;
const bool coordinats_are_correct = (x1 > 0 && x1 < display_width) && (x2 > 0 && x2 < display_width) && (y1 > 0 && y1 < display_height) && (y2 > 0 && y2 < display_height);
if (coordinats_are_correct) {
const int deltaX = abs(x2 - x1);
const int deltaY = abs(y2 - y1);
const int signX = x1 < x2 ? 1 : -1;
const int signY = y1 < y2 ? 1 : -1;
//
int error = deltaX - deltaY;
display_buffer[display_width * y2 + x2].rgb_reserved = 0;
display_buffer[display_width * y2 + x2].rgb_red = 255;
display_buffer[display_width * y2 + x2].rgb_green = 0;
display_buffer[display_width * y2 + x2].rgb_blue = 0;
/*display_buffer[display_width * y1 + x1].rgb_reserved = 0;
display_buffer[display_width * y1 + x1].rgb_red = 255;
display_buffer[display_width * y1 + x1].rgb_green = 0;
display_buffer[display_width * y1 + x1].rgb_blue = 0;*/
while (x1 != x2 || y1 != y2)
{
display_buffer[display_width * y1 + x1].rgb_reserved = 0;
display_buffer[display_width * y1 + x1].rgb_red = 255;
display_buffer[display_width * y1 + x1].rgb_green = 0;
display_buffer[display_width * y1 + x1].rgb_blue = 0;
const int error2 = error * 2;
//
if (error2 > -deltaY)
{
error -= deltaY;
x1 += signX;
}
if (error2 < deltaX)
{
error += deltaX;
y1 += signY;
}
}
}
}
}
}
__global__ void draw(RgbPixel* display_buffer) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
display_buffer[thread_id].rgb_green = 255;
}
CameraInfo GraphicEngine::GetCameraInfo() {
CameraInfo info;
info.camera_pos = (*camera_->GetPosition());
info.dis_proj_plane = camera_->GetDistanceToProjPlane();
info.vector_x = camera_->GetVectorX();
info.vector_y = camera_->GetVectorY();
info.vector_z = camera_->GetDirection();
return info;
}
void GraphicEngine::TestFunction()
{
}
void GraphicEngine::CreateMeshFrame() {
const Vertex3D const camera_position = *(camera_->GetPosition());
const Vector3D vector_x = camera_->GetVectorX();
const Vector3D vector_y = camera_->GetVectorY();
const Vector3D vector_z = camera_->GetDirection();
const float distance_to_projection_plane = camera_->GetDistanceToProjPlane();
const Vertex3D* const device_vertexs_3d = device_data_.deviceVertexs;
Vertex2D* const device_vertexs_2d = device_vertexs_2d_;
const Polygon3D* const device_polygons = device_data_.devicePolygons;
Normal3D* device_normals = device_data_.deviceNormals;
RgbColor color;
color.rgb_blue = 20;
color.rgb_green = 255;
color.rgb_red = 0;
const unsigned int number_of_threads = 1024;
unsigned int number_of_blocks = (data_info_.numberOfVertexs + number_of_threads - 1) / number_of_threads;
SetDisplayBufferColor(color);
hipLaunchKernelGGL(( ProjectVertexs) , dim3(number_of_blocks), dim3(number_of_threads), 0, 0, device_vertexs_3d, device_vertexs_2d, data_info_.numberOfVertexs, camera_position, vector_x, vector_y, vector_z, distance_to_projection_plane);
hipLaunchKernelGGL(( ConvertInDisplayCoordinats) , dim3(number_of_blocks), dim3(number_of_threads) , 0, 0, device_vertexs_2d, data_info_.numberOfVertexs, display_width_, display_height_);
number_of_blocks = (data_info_.numberOfPolygons * 3 + number_of_threads - 1) / number_of_threads;
hipLaunchKernelGGL(( DrawLines) , dim3(number_of_blocks), dim3(number_of_threads) , 0, 0, device_vertexs_2d, device_polygons, device_normals, data_info_.numberOfPolygons, device_display_buffer_, display_width_, display_height_,vector_z);
//hipMemcpy((void**)host_display_buffer_, device_display_buffer_, display_buffer_size_, hipMemcpyDeviceToHost);
//const Vertex3D* const host_vertexs_3d = data_info_.allVertexs;
//hipMemcpy(host_display_buffer_, device_display_buffer_, size_of_display_buffer_, hipMemcpyDeviceToHost);
//CPUCountingProjectVertexs(host_vertexs_3d, this->host_vertexs_2d_, this->data_info_.numberOfVertexs, camera_position, vector_x, vector_y, vector_z, distance_to_projection_plane);
//CPUCountingConvertInDisplayCoordinats(host_vertexs_2d_, this->data_info_.numberOfVertexs, display_width_, display_height_);
//Polygon3D* host_polygons = this->data_info_.allPolygons;
//CPUCountingDrawLines(host_vertexs_2d_, host_polygons, this->data_info_.numberOfPolygons, this->host_display_buffer_, display_width_, display_height_);
//hipMemcpy(host_display_buffer_, device_display_buffer_, size_of_display_buffer_, hipMemcpyDeviceToHost);
}
struct Proj_vertex {
float x;
float y;
float _z;
};
inline __device__ void swap(Proj_vertex& a, Proj_vertex& b) {
Proj_vertex temporary = b;
b = a;
a = temporary;
}
inline __device__ bool InPositiveHalfPlane(const Vertex2D& pixel, const Vertex2D& triangle_vertex, Vector2D& _normal) {
Vector2D pixel_vector;
pixel_vector.x = pixel.x - triangle_vertex.x;
pixel_vector.y = pixel.y - triangle_vertex.y;
Vector2D normal = _normal;
float length_n = sqrt(normal.x * normal.x + normal.y * normal.y);
normal.x /= length_n;
normal.y /= length_n;
float length_p = sqrt(pixel_vector.x * pixel_vector.x + pixel_vector.y * pixel_vector.y);
pixel_vector.x /= length_p;
pixel_vector.y /=length_p;
float scalar = pixel_vector.x * normal.x + pixel_vector.y * normal.y;
if (scalar >= 0.0f) return true;
else
return false;
}
struct InfoForPainting {
Vertex3D* d_vertexs;
Polygon3D* d_polygons;
unsigned int number_of_polygons;
//RgbPixel* d_rgb;
//unsigned int number_of_colors;
unsigned int threads_per_triangle;
unsigned int screen_width;
unsigned int screen_height;
};
__device__ float Interpolate(float y1, float I1, float y2, float I2, float ya) {
return I1 * ((ya - y2)/(y1 - y2)) + I2 * ((y1 - ya)/(y1 - y2));
}
__global__ void DrawPolygons(z_mutex* z_buffer, RgbPixel* display_buffer, Vertex2D* vertexs_2d, InfoForPainting info) {
int thread_index = threadIdx.x + blockDim.x * blockIdx.x;
if (thread_index < info.number_of_polygons * info.threads_per_triangle) {
//if (thread_index % info.threads_per_triangle == 0) {
unsigned int thread_ = (thread_index % info.threads_per_triangle);
Polygon3D polygon = info.d_polygons[thread_index / info.threads_per_triangle];//<---
//printf("%d", thread_index);
Proj_vertex proj_vertexs[3];
for (int i = 0; i < 3; i++)
{
proj_vertexs[i].x = vertexs_2d[polygon.ratios[i].vertexNumber].x;
//printf("x: %2f", proj_vertexs[i].x);
proj_vertexs[i].y = vertexs_2d[polygon.ratios[i].vertexNumber].y;
//printf("y: %2f \n", proj_vertexs[i].y);
proj_vertexs[i]._z = 1.0f / info.d_vertexs[polygon.ratios[i].vertexNumber].z;
}
int min_x = 10000, min_y = 10000, max_x = -1, max_y = -1;
for (int i = 0; i < 3; i++)
{
if (proj_vertexs[i].x < min_x) min_x = floor(proj_vertexs[i].x);
if (proj_vertexs[i].y < min_y) min_y = floor(proj_vertexs[i].y);
if (proj_vertexs[i].x > max_x) max_x = ceil(proj_vertexs[i].x);
if (proj_vertexs[i].y > max_y) max_y = ceil(proj_vertexs[i].y);
}
RgbPixel polygon_color = polygon.color;//info.d_rgb[thread_index ];
//printf("r: %f g: %f b: %f \n", polygon_color.rgb_red, polygon_color.rgb_green, polygon_color.rgb_blue);
//polygon_color.rgb_blue = 0;
//polygon_color.rgb_green = 0;
//polygon_color.rgb_red = 255;
//Sorting vertexs by y 2d coordinat
//Clockwise direction
Vertex2D AToB;
AToB.x = proj_vertexs[1].x - proj_vertexs[0].x;
AToB.y = proj_vertexs[1].y - proj_vertexs[0].y;
Vertex2D BToC;
BToC.x = proj_vertexs[2].x - proj_vertexs[1].x;
BToC.y = proj_vertexs[2].y - proj_vertexs[1].y;
float crossz = AToB.x * BToC.y - AToB.y * BToC.x;
if (crossz > 0.0f)
{
Proj_vertex temporary = proj_vertexs[2];
proj_vertexs[2] = proj_vertexs[1];
proj_vertexs[1] = temporary;
}
float length;
Vector2D bot_mid = { proj_vertexs[1].y - proj_vertexs[0].y, -proj_vertexs[1].x + proj_vertexs[0].x };
length = sqrt(bot_mid.x * bot_mid.x + bot_mid.y * bot_mid.y);
bot_mid.x /= length;
bot_mid.y /= length;
Vector2D mid_top = { proj_vertexs[2].y - proj_vertexs[1].y, -proj_vertexs[2].x + proj_vertexs[1].x };
length = sqrt(mid_top.x * mid_top.x + mid_top.y * mid_top.y);
mid_top.x /= length;
mid_top.y /= length;
Vector2D top_bot = { proj_vertexs[0].y - proj_vertexs[2].y, -proj_vertexs[0].x + proj_vertexs[2].x, };
length = sqrt(top_bot.x * top_bot.x + top_bot.y * top_bot.y);
top_bot.x /= length;
top_bot.y /= length;
const Vertex2D bot = { proj_vertexs[0].x, proj_vertexs[0].y };
const Vertex2D mid = { proj_vertexs[1].x, proj_vertexs[1].y };
const Vertex2D top = { proj_vertexs[2].x, proj_vertexs[2].y };
//printf("bot: %2f, %2f, mid: %2f %2f, top: %2f %2f \n", bot.x, bot.y,mid.x, mid.y,top.x, top.y);
//printf("bot_mid: %2f %2f, mid_top: %2f %2f, top_bot: %2f %2f \n", bot_mid.x, bot_mid.y, mid_top.x, mid_top.y, top_bot.x, top_bot.y);
unsigned int delta_y = max_y - min_y;
unsigned int delta_x = max_x - min_x;
unsigned int index = thread_;
for (int i = 0; i < (delta_y * delta_x) / info.threads_per_triangle; i++)
{
unsigned int x = (index % delta_x) + min_x;
unsigned int y = (index / delta_x) + min_y;
Vertex2D pixel;
pixel.x = ((float)x + 0.5f);
pixel.y = ((float)y + 0.5f);
bool PixelInTriangle = InPositiveHalfPlane(pixel, bot, bot_mid) && InPositiveHalfPlane(pixel, mid, mid_top) && InPositiveHalfPlane(pixel, top, top_bot);
if (PixelInTriangle) {
Proj_vertex v[3];
for (int i = 0; i < 3; i++) v[i] = proj_vertexs[i];
//printf("Before %f %f %f \n", p_vertexs[0].y, p_vertexs[1].y, p_vertexs[2].y);
if (v[0].y < v[1].y) swap(v[0], v[1]);
if (v[1].y < v[2].y) swap(v[1], v[2]);
if (v[0].y < v[1].y) swap(v[0], v[1]);
float I1 = v[0]._z, I2 = v[1]._z, I3 = v[2]._z;
float X1 = v[0].x, X2 = v[1].x, X3 = v[2].x;
float Xa, Xb;
float Ia, Ib, Ip;
if (pixel.y > v[1].y) {
Ia = Interpolate(v[0].y, I1, v[1].y, I2, pixel.y);
Xa = Interpolate(v[0].y, v[0].x, v[1].y, v[1].x, pixel.y);
}
else {
Ia = Interpolate(v[2].y, I3, v[1].y, I2, pixel.y);
Xa = Interpolate(v[2].y, v[2].x, v[1].y, v[1].x, pixel.y);
}
Ib = Interpolate(v[0].y, I1, v[2].y, I3, pixel.y);
Xb = Interpolate(v[0].y, v[0].x, v[2].y, v[2].x, pixel.y);
Ip = Interpolate(Xa, Ia, Xb, Ib, pixel.x);
float& pixel_z = Ip;
//printf("after %f %f %f \n", p_vertexs[0].y, p_vertexs[1].y, p_vertexs[2].y);
//while ((z_buffer + 1920 * y + x)->mutex == true) continue;
//(z_buffer + 1920 * y + x)->mutex = true;
//printf("delta_x:%d delta_y:%d x: %d y:%d\n", delta_x, delta_y, x, y);
//printf("r: %d, g:%d, b:%d \n", polygon_color.rgb_red, polygon_color.rgb_green, polygon_color.rgb_blue);
//printf("%f \n", pixel_z);
//if (1.0f / (z_buffer + 1920 * y + x)->z > pixel_z) {
//RgbPixel color = {100, 200, 50, 0};
*(display_buffer + 1920 * y + x) = polygon_color;// color;// polygon_color;
//}
//(z_buffer + 1920 * y + x)->mutex = false;
}
index += info.threads_per_triangle;
}
//for (int y = min_y; y < max_y; y++)
// for (int x = min_x; x < max_x; x++)
// {
// Vertex2D pixel;
// pixel.x = ((float)x + 0.5f);
// pixel.y = ((float)y + 0.5f);
//
// bool PixelInTriangle = InPositiveHalfPlane(pixel, bot, bot_mid) && InPositiveHalfPlane(pixel, mid, mid_top) && InPositiveHalfPlane(pixel, top, top_bot);
// if (PixelInTriangle) {
// Proj_vertex v[3];
// for (int i = 0; i < 3; i++) v[i] = proj_vertexs[i];
// //printf("Before %f %f %f \n", p_vertexs[0].y, p_vertexs[1].y, p_vertexs[2].y);
// if (v[0].y < v[1].y) swap(v[0], v[1]);
// if (v[1].y < v[2].y) swap(v[1], v[2]);
// if (v[0].y < v[1].y) swap(v[0], v[1]);
// float I1 = v[0]._z, I2 = v[1]._z, I3 = v[2]._z;
// float X1 = v[0].x, X2 = v[1].x, X3 = v[2].x;
// float Xa, Xb;
// float Ia, Ib, Ip;
// if (pixel.y > v[1].y) {
// Ia = Interpolate(v[0].y, I1, v[1].y, I2, pixel.y);
// Xa = Interpolate(v[0].y, v[0].x, v[1].y, v[1].x, pixel.y);
// }
// else {
// Ia = Interpolate(v[2].y, I3, v[1].y, I2, pixel.y);
// Xa = Interpolate(v[2].y, v[2].x, v[1].y, v[1].x, pixel.y);
// }
// Ib = Interpolate(v[0].y, I1, v[2].y, I3, pixel.y);
// Xb = Interpolate(v[0].y, v[0].x, v[2].y, v[2].x, pixel.y);
// Ip = Interpolate(Xa, Ia, Xb, Ib, pixel.x);
// float& pixel_z = Ip;
// //printf("after %f %f %f \n", p_vertexs[0].y, p_vertexs[1].y, p_vertexs[2].y);
//
// while ( (z_buffer + 1920 * y + x)->mutex == true ) continue;
// (z_buffer + 1920 * y + x)->mutex = true;
// if (1.0f / (z_buffer + 1920 * y + x)->z > pixel_z) {
// * (display_buffer + 1920 * y + x) = polygon_color;
// }
// (z_buffer + 1920 * y + x)->mutex = false;
// }
//
// }
}
}
void GraphicEngine::CreateFlatFrame() {
const Vertex3D const camera_position = *(camera_->GetPosition());
const Vector3D vector_x = camera_->GetVectorX();
const Vector3D vector_y = camera_->GetVectorY();
const Vector3D vector_z = camera_->GetDirection();
const float distance_to_projection_plane = camera_->GetDistanceToProjPlane();
Vertex3D* const device_vertexs_3d = device_data_.deviceVertexs;
Vertex2D* const device_vertexs_2d = device_vertexs_2d_;
Polygon3D* const device_polygons = device_data_.devicePolygons;
RgbColor color;
color.rgb_blue = 255;
color.rgb_green = 255;
color.rgb_red = 255;
hipMemset(z_mutex_, 0, display_width_ * display_height_ * sizeof(z_mutex));
const unsigned int number_of_threads = 1024;
unsigned int number_of_blocks = (data_info_.numberOfVertexs + number_of_threads - 1) / number_of_threads;
SetDisplayBufferColor(color);
hipLaunchKernelGGL(( ProjectVertexs) , dim3(number_of_blocks), dim3(number_of_threads) , 0, 0, device_vertexs_3d, device_vertexs_2d, data_info_.numberOfVertexs, camera_position, vector_x, vector_y, vector_z, distance_to_projection_plane);
hipLaunchKernelGGL(( ConvertInDisplayCoordinats) , dim3(number_of_blocks), dim3(number_of_threads) , 0, 0, device_vertexs_2d, data_info_.numberOfVertexs, display_width_, display_height_);
number_of_blocks = (data_info_.numberOfPolygons * threads_per_triangle_ + number_of_threads - 1) / number_of_threads;
InfoForPainting info = { device_vertexs_3d, device_polygons, data_info_.numberOfPolygons, /*(RgbPixel*)device_data_.device_colors, data_info_.numberOfRgbColors,*/ threads_per_triangle_, display_width_, display_height_ };
hipLaunchKernelGGL(( DrawPolygons) , dim3(number_of_blocks), dim3(number_of_threads) , 0, 0, z_mutex_, device_display_buffer_, device_vertexs_2d, info);
//system("pause");
}
| e7df993abe279de09ea8161190844feefb0403ad.cu |
#include "GraphicEngineCUDA.cuh"
#include "__graphic_engine_/GraphicEngine.h"
#include<iostream>
void GraphicEngine::AllocateVertex2D() {
unsigned int vertexs2d_size = data_info_.numberOfVertexs * sizeof(Vertex2D);
cudaMalloc((void**)& device_vertexs_2d_, vertexs2d_size);
cudaMallocHost((void**)& host_vertexs_2d_, vertexs2d_size);
};
__global__ void ProjectVertexs(const Vertex3D* const vertexs_3d, Vertex2D* const vertexs_2d, const unsigned int number_of_vertexs, const Vertex3D cameraPosition, const Vector3D vectorX, const Vector3D vectorY, const Vector3D vectorZ, const float k) {
int threadIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIndex < number_of_vertexs) {
//Offset
float offset_x = vertexs_3d[threadIndex].x - cameraPosition.x;
float offset_y = vertexs_3d[threadIndex].y - cameraPosition.y;
float offset_z = vertexs_3d[threadIndex].z - cameraPosition.z;
float new_x = offset_x * vectorX.x + offset_y * vectorX.y + offset_z * vectorX.z;
float new_y = offset_x * vectorY.x + offset_y * vectorY.y + offset_z * vectorY.z;
float new_z = offset_x * vectorZ.x + offset_y * vectorZ.y + offset_z * vectorZ.z;
vertexs_2d[threadIndex].x = ( k * new_x / (new_z + k) ) ;
vertexs_2d[threadIndex].y = ( k * new_y / (new_z + k) ) ;
}
}
__global__ void ConvertInDisplayCoordinats(Vertex2D* const vertexs_2d, const unsigned int number_of_vertexs, const unsigned int display_width, const unsigned int display_height) {
int threadIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIndex < number_of_vertexs) {
vertexs_2d[threadIndex].x = vertexs_2d[threadIndex].x * 1000.0f + display_width / 2;
vertexs_2d[threadIndex].y = display_height / 2 - vertexs_2d[threadIndex].y * 1000.0f;
}
}
__global__ void DrawLines(const Vertex2D* const vertexs_2d, const Polygon3D* const device_polygons, const Normal3D* normals, const unsigned int number_of_polygons, RgbPixel* const display_buffer, const unsigned int display_width, const unsigned int display_height, Vector3D camera_dir) {
int threadIndex = threadIdx.x + blockIdx.x * blockDim.x;
if (threadIndex < number_of_polygons * 3) {
const unsigned int polygon_number = threadIndex / 3;
const unsigned int vertex_number = threadIndex % 3;
Normal3D normal = normals[ device_polygons[polygon_number].ratios[vertex_number].normalNumber];
float scalar = camera_dir.x * normal.x + camera_dir.y * normal.y + camera_dir.z * normal.z;
//if (scalar <= 0.0f)
{
const unsigned int first_vertex_index = device_polygons[polygon_number].ratios[vertex_number].vertexNumber;
const unsigned int second_vertex_number = ((vertex_number + 1) < 3) ? (vertex_number + 1) : 0;
const unsigned int second_vertex_index = device_polygons[polygon_number].ratios[second_vertex_number].vertexNumber;
int x1 = vertexs_2d[first_vertex_index].x;
int y1 = vertexs_2d[first_vertex_index].y;
const int x2 = vertexs_2d[second_vertex_index].x;
const int y2 = vertexs_2d[second_vertex_index].y;
const bool coordinats_are_correct = (x1 > 0 && x1 < display_width) && (x2 > 0 && x2 < display_width) && (y1 > 0 && y1 < display_height) && (y2 > 0 && y2 < display_height);
if (coordinats_are_correct) {
const int deltaX = abs(x2 - x1);
const int deltaY = abs(y2 - y1);
const int signX = x1 < x2 ? 1 : -1;
const int signY = y1 < y2 ? 1 : -1;
//
int error = deltaX - deltaY;
display_buffer[display_width * y2 + x2].rgb_reserved = 0;
display_buffer[display_width * y2 + x2].rgb_red = 255;
display_buffer[display_width * y2 + x2].rgb_green = 0;
display_buffer[display_width * y2 + x2].rgb_blue = 0;
while (x1 != x2 || y1 != y2)
{
display_buffer[display_width * y1 + x1].rgb_reserved = 0;
display_buffer[display_width * y1 + x1].rgb_red = 255;
display_buffer[display_width * y1 + x1].rgb_green = 0;
display_buffer[display_width * y1 + x1].rgb_blue = 0;
const int error2 = error * 2;
//
if (error2 > -deltaY)
{
error -= deltaY;
x1 += signX;
}
if (error2 < deltaX)
{
error += deltaX;
y1 += signY;
}
}
}
}
}
}
__global__ void SetScreenColor(RgbPixel* const device_display_buffer, const RgbPixel rgb_pixel, const unsigned int number_of_pixels) {
int thread_index = threadIdx.x + blockIdx.x * blockDim.x;
if (thread_index < number_of_pixels) device_display_buffer[thread_index] = rgb_pixel;
}
void GraphicEngine::SetDisplayBufferColor(const RgbColor& rgb_color)
{
const RgbPixel rgb_pixel = { rgb_color.rgb_blue, rgb_color.rgb_green, rgb_color.rgb_red, rgb_color.rgb_reserved };
const unsigned int number_of_threads = 1024;
const unsigned int number_of_blocks = (1920 * 1080 * sizeof(RgbPixel) + number_of_threads - 1) / number_of_threads;
SetScreenColor <<< number_of_blocks, number_of_threads >>> (this->device_display_buffer_, rgb_pixel, 1920 * 1080);
}
void CPUCountingProjectVertexs(const Vertex3D* const vertexs_3d, Vertex2D* const vertexs_2d, const unsigned int number_of_vertexs, const Vertex3D cameraPosition, const Vector3D vectorX, const Vector3D vectorY, const Vector3D vectorZ, const float k) {
for (size_t i = 0; i < number_of_vertexs; i++)
{
//Offset
const float offset_x = vertexs_3d[i].x - cameraPosition.x;
const float offset_y = vertexs_3d[i].y - cameraPosition.y;
const float offset_z = vertexs_3d[i].z - cameraPosition.z;
const float new_x = offset_x * vectorX.x + offset_y * vectorX.y + offset_z * vectorX.z;
const float new_y = offset_x * vectorY.x + offset_y * vectorY.y + offset_z * vectorY.z;
const float new_z = offset_x * vectorZ.x + offset_y * vectorZ.y + offset_z * vectorZ.z;
vertexs_2d[i].x = (k * new_x / (new_z + k));
vertexs_2d[i].y = (k * new_y / (new_z + k));
}
}
void CPUCountingConvertInDisplayCoordinats(Vertex2D* const vertexs_2d, const unsigned int number_of_vertexs, const unsigned int display_width, const unsigned int display_height) {
for (size_t i = 0; i < number_of_vertexs; i++)
{
vertexs_2d[i].x = vertexs_2d[i].x * (float) 1000 + display_width / 2;
vertexs_2d[i].y = display_height / 2 - vertexs_2d[i].y * 1000;
}
}
void CPUCountingDrawLines(const Vertex2D* const vertexs_2d, const Polygon3D* const device_polygons, const unsigned int number_of_polygons, RgbPixel* const display_buffer, const unsigned int display_width, const unsigned int display_height) {
{
const unsigned int number_of_lines = number_of_polygons * 3;
for (size_t i = 0; i < number_of_lines; i++)
{
const unsigned int polygon_number = i / 3;
const unsigned int vertex_number = i % 3;
const unsigned int first_vertex_index = device_polygons[polygon_number].ratios[vertex_number].vertexNumber;
const unsigned int second_vertex_number = ((vertex_number + 1) < 3) ? (vertex_number + 1) : 0;
const unsigned int second_vertex_index = device_polygons[polygon_number].ratios[second_vertex_number].vertexNumber;
int x1 = vertexs_2d[first_vertex_index].x;
int y1 = vertexs_2d[first_vertex_index].y;
int x2 = vertexs_2d[second_vertex_index].x;
int y2 = vertexs_2d[second_vertex_index].y;
const bool coordinats_are_correct = (x1 > 0 && x1 < display_width) && (x2 > 0 && x2 < display_width) && (y1 > 0 && y1 < display_height) && (y2 > 0 && y2 < display_height);
if (coordinats_are_correct) {
const int deltaX = abs(x2 - x1);
const int deltaY = abs(y2 - y1);
const int signX = x1 < x2 ? 1 : -1;
const int signY = y1 < y2 ? 1 : -1;
//
int error = deltaX - deltaY;
display_buffer[display_width * y2 + x2].rgb_reserved = 0;
display_buffer[display_width * y2 + x2].rgb_red = 255;
display_buffer[display_width * y2 + x2].rgb_green = 0;
display_buffer[display_width * y2 + x2].rgb_blue = 0;
/*display_buffer[display_width * y1 + x1].rgb_reserved = 0;
display_buffer[display_width * y1 + x1].rgb_red = 255;
display_buffer[display_width * y1 + x1].rgb_green = 0;
display_buffer[display_width * y1 + x1].rgb_blue = 0;*/
while (x1 != x2 || y1 != y2)
{
display_buffer[display_width * y1 + x1].rgb_reserved = 0;
display_buffer[display_width * y1 + x1].rgb_red = 255;
display_buffer[display_width * y1 + x1].rgb_green = 0;
display_buffer[display_width * y1 + x1].rgb_blue = 0;
const int error2 = error * 2;
//
if (error2 > -deltaY)
{
error -= deltaY;
x1 += signX;
}
if (error2 < deltaX)
{
error += deltaX;
y1 += signY;
}
}
}
}
}
}
__global__ void draw(RgbPixel* display_buffer) {
int thread_id = threadIdx.x + blockIdx.x * blockDim.x;
display_buffer[thread_id].rgb_green = 255;
}
CameraInfo GraphicEngine::GetCameraInfo() {
CameraInfo info;
info.camera_pos = (*camera_->GetPosition());
info.dis_proj_plane = camera_->GetDistanceToProjPlane();
info.vector_x = camera_->GetVectorX();
info.vector_y = camera_->GetVectorY();
info.vector_z = camera_->GetDirection();
return info;
}
void GraphicEngine::TestFunction()
{
}
void GraphicEngine::CreateMeshFrame() {
const Vertex3D const camera_position = *(camera_->GetPosition());
const Vector3D vector_x = camera_->GetVectorX();
const Vector3D vector_y = camera_->GetVectorY();
const Vector3D vector_z = camera_->GetDirection();
const float distance_to_projection_plane = camera_->GetDistanceToProjPlane();
const Vertex3D* const device_vertexs_3d = device_data_.deviceVertexs;
Vertex2D* const device_vertexs_2d = device_vertexs_2d_;
const Polygon3D* const device_polygons = device_data_.devicePolygons;
Normal3D* device_normals = device_data_.deviceNormals;
RgbColor color;
color.rgb_blue = 20;
color.rgb_green = 255;
color.rgb_red = 0;
const unsigned int number_of_threads = 1024;
unsigned int number_of_blocks = (data_info_.numberOfVertexs + number_of_threads - 1) / number_of_threads;
SetDisplayBufferColor(color);
ProjectVertexs <<<number_of_blocks, number_of_threads>>> (device_vertexs_3d, device_vertexs_2d, data_info_.numberOfVertexs, camera_position, vector_x, vector_y, vector_z, distance_to_projection_plane);
ConvertInDisplayCoordinats <<<number_of_blocks, number_of_threads >>> (device_vertexs_2d, data_info_.numberOfVertexs, display_width_, display_height_);
number_of_blocks = (data_info_.numberOfPolygons * 3 + number_of_threads - 1) / number_of_threads;
DrawLines <<<number_of_blocks, number_of_threads >>> (device_vertexs_2d, device_polygons, device_normals, data_info_.numberOfPolygons, device_display_buffer_, display_width_, display_height_,vector_z);
//cudaMemcpy((void**)host_display_buffer_, device_display_buffer_, display_buffer_size_, cudaMemcpyDeviceToHost);
//const Vertex3D* const host_vertexs_3d = data_info_.allVertexs;
//cudaMemcpy(host_display_buffer_, device_display_buffer_, size_of_display_buffer_, cudaMemcpyDeviceToHost);
//CPUCountingProjectVertexs(host_vertexs_3d, this->host_vertexs_2d_, this->data_info_.numberOfVertexs, camera_position, vector_x, vector_y, vector_z, distance_to_projection_plane);
//CPUCountingConvertInDisplayCoordinats(host_vertexs_2d_, this->data_info_.numberOfVertexs, display_width_, display_height_);
//Polygon3D* host_polygons = this->data_info_.allPolygons;
//CPUCountingDrawLines(host_vertexs_2d_, host_polygons, this->data_info_.numberOfPolygons, this->host_display_buffer_, display_width_, display_height_);
//cudaMemcpy(host_display_buffer_, device_display_buffer_, size_of_display_buffer_, cudaMemcpyDeviceToHost);
}
struct Proj_vertex {
float x;
float y;
float _z;
};
inline __device__ void swap(Proj_vertex& a, Proj_vertex& b) {
Proj_vertex temporary = b;
b = a;
a = temporary;
}
inline __device__ bool InPositiveHalfPlane(const Vertex2D& pixel, const Vertex2D& triangle_vertex, Vector2D& _normal) {
Vector2D pixel_vector;
pixel_vector.x = pixel.x - triangle_vertex.x;
pixel_vector.y = pixel.y - triangle_vertex.y;
Vector2D normal = _normal;
float length_n = sqrt(normal.x * normal.x + normal.y * normal.y);
normal.x /= length_n;
normal.y /= length_n;
float length_p = sqrt(pixel_vector.x * pixel_vector.x + pixel_vector.y * pixel_vector.y);
pixel_vector.x /= length_p;
pixel_vector.y /=length_p;
float scalar = pixel_vector.x * normal.x + pixel_vector.y * normal.y;
if (scalar >= 0.0f) return true;
else
return false;
}
struct InfoForPainting {
Vertex3D* d_vertexs;
Polygon3D* d_polygons;
unsigned int number_of_polygons;
//RgbPixel* d_rgb;
//unsigned int number_of_colors;
unsigned int threads_per_triangle;
unsigned int screen_width;
unsigned int screen_height;
};
__device__ float Interpolate(float y1, float I1, float y2, float I2, float ya) {
return I1 * ((ya - y2)/(y1 - y2)) + I2 * ((y1 - ya)/(y1 - y2));
}
__global__ void DrawPolygons(z_mutex* z_buffer, RgbPixel* display_buffer, Vertex2D* vertexs_2d, InfoForPainting info) {
int thread_index = threadIdx.x + blockDim.x * blockIdx.x;
if (thread_index < info.number_of_polygons * info.threads_per_triangle) {
//if (thread_index % info.threads_per_triangle == 0) {
unsigned int thread_ = (thread_index % info.threads_per_triangle);
Polygon3D polygon = info.d_polygons[thread_index / info.threads_per_triangle];//<---
//printf("%d", thread_index);
Proj_vertex proj_vertexs[3];
for (int i = 0; i < 3; i++)
{
proj_vertexs[i].x = vertexs_2d[polygon.ratios[i].vertexNumber].x;
//printf("x: %2f", proj_vertexs[i].x);
proj_vertexs[i].y = vertexs_2d[polygon.ratios[i].vertexNumber].y;
//printf("y: %2f \n", proj_vertexs[i].y);
proj_vertexs[i]._z = 1.0f / info.d_vertexs[polygon.ratios[i].vertexNumber].z;
}
int min_x = 10000, min_y = 10000, max_x = -1, max_y = -1;
for (int i = 0; i < 3; i++)
{
if (proj_vertexs[i].x < min_x) min_x = floor(proj_vertexs[i].x);
if (proj_vertexs[i].y < min_y) min_y = floor(proj_vertexs[i].y);
if (proj_vertexs[i].x > max_x) max_x = ceil(proj_vertexs[i].x);
if (proj_vertexs[i].y > max_y) max_y = ceil(proj_vertexs[i].y);
}
RgbPixel polygon_color = polygon.color;//info.d_rgb[thread_index ];
//printf("r: %f g: %f b: %f \n", polygon_color.rgb_red, polygon_color.rgb_green, polygon_color.rgb_blue);
//polygon_color.rgb_blue = 0;
//polygon_color.rgb_green = 0;
//polygon_color.rgb_red = 255;
//Sorting vertexs by y 2d coordinat
//Clockwise direction
Vertex2D AToB;
AToB.x = proj_vertexs[1].x - proj_vertexs[0].x;
AToB.y = proj_vertexs[1].y - proj_vertexs[0].y;
Vertex2D BToC;
BToC.x = proj_vertexs[2].x - proj_vertexs[1].x;
BToC.y = proj_vertexs[2].y - proj_vertexs[1].y;
float crossz = AToB.x * BToC.y - AToB.y * BToC.x;
if (crossz > 0.0f)
{
Proj_vertex temporary = proj_vertexs[2];
proj_vertexs[2] = proj_vertexs[1];
proj_vertexs[1] = temporary;
}
float length;
Vector2D bot_mid = { proj_vertexs[1].y - proj_vertexs[0].y, -proj_vertexs[1].x + proj_vertexs[0].x };
length = sqrt(bot_mid.x * bot_mid.x + bot_mid.y * bot_mid.y);
bot_mid.x /= length;
bot_mid.y /= length;
Vector2D mid_top = { proj_vertexs[2].y - proj_vertexs[1].y, -proj_vertexs[2].x + proj_vertexs[1].x };
length = sqrt(mid_top.x * mid_top.x + mid_top.y * mid_top.y);
mid_top.x /= length;
mid_top.y /= length;
Vector2D top_bot = { proj_vertexs[0].y - proj_vertexs[2].y, -proj_vertexs[0].x + proj_vertexs[2].x, };
length = sqrt(top_bot.x * top_bot.x + top_bot.y * top_bot.y);
top_bot.x /= length;
top_bot.y /= length;
const Vertex2D bot = { proj_vertexs[0].x, proj_vertexs[0].y };
const Vertex2D mid = { proj_vertexs[1].x, proj_vertexs[1].y };
const Vertex2D top = { proj_vertexs[2].x, proj_vertexs[2].y };
//printf("bot: %2f, %2f, mid: %2f %2f, top: %2f %2f \n", bot.x, bot.y,mid.x, mid.y,top.x, top.y);
//printf("bot_mid: %2f %2f, mid_top: %2f %2f, top_bot: %2f %2f \n", bot_mid.x, bot_mid.y, mid_top.x, mid_top.y, top_bot.x, top_bot.y);
unsigned int delta_y = max_y - min_y;
unsigned int delta_x = max_x - min_x;
unsigned int index = thread_;
for (int i = 0; i < (delta_y * delta_x) / info.threads_per_triangle; i++)
{
unsigned int x = (index % delta_x) + min_x;
unsigned int y = (index / delta_x) + min_y;
Vertex2D pixel;
pixel.x = ((float)x + 0.5f);
pixel.y = ((float)y + 0.5f);
bool PixelInTriangle = InPositiveHalfPlane(pixel, bot, bot_mid) && InPositiveHalfPlane(pixel, mid, mid_top) && InPositiveHalfPlane(pixel, top, top_bot);
if (PixelInTriangle) {
Proj_vertex v[3];
for (int i = 0; i < 3; i++) v[i] = proj_vertexs[i];
//printf("Before %f %f %f \n", p_vertexs[0].y, p_vertexs[1].y, p_vertexs[2].y);
if (v[0].y < v[1].y) swap(v[0], v[1]);
if (v[1].y < v[2].y) swap(v[1], v[2]);
if (v[0].y < v[1].y) swap(v[0], v[1]);
float I1 = v[0]._z, I2 = v[1]._z, I3 = v[2]._z;
float X1 = v[0].x, X2 = v[1].x, X3 = v[2].x;
float Xa, Xb;
float Ia, Ib, Ip;
if (pixel.y > v[1].y) {
Ia = Interpolate(v[0].y, I1, v[1].y, I2, pixel.y);
Xa = Interpolate(v[0].y, v[0].x, v[1].y, v[1].x, pixel.y);
}
else {
Ia = Interpolate(v[2].y, I3, v[1].y, I2, pixel.y);
Xa = Interpolate(v[2].y, v[2].x, v[1].y, v[1].x, pixel.y);
}
Ib = Interpolate(v[0].y, I1, v[2].y, I3, pixel.y);
Xb = Interpolate(v[0].y, v[0].x, v[2].y, v[2].x, pixel.y);
Ip = Interpolate(Xa, Ia, Xb, Ib, pixel.x);
float& pixel_z = Ip;
//printf("after %f %f %f \n", p_vertexs[0].y, p_vertexs[1].y, p_vertexs[2].y);
//while ((z_buffer + 1920 * y + x)->mutex == true) continue;
//(z_buffer + 1920 * y + x)->mutex = true;
//printf("delta_x:%d delta_y:%d x: %d y:%d\n", delta_x, delta_y, x, y);
//printf("r: %d, g:%d, b:%d \n", polygon_color.rgb_red, polygon_color.rgb_green, polygon_color.rgb_blue);
//printf("%f \n", pixel_z);
//if (1.0f / (z_buffer + 1920 * y + x)->z > pixel_z) {
//RgbPixel color = {100, 200, 50, 0};
*(display_buffer + 1920 * y + x) = polygon_color;// color;// polygon_color;
//}
//(z_buffer + 1920 * y + x)->mutex = false;
}
index += info.threads_per_triangle;
}
//for (int y = min_y; y < max_y; y++)
// for (int x = min_x; x < max_x; x++)
// {
// Vertex2D pixel;
// pixel.x = ((float)x + 0.5f);
// pixel.y = ((float)y + 0.5f);
//
// bool PixelInTriangle = InPositiveHalfPlane(pixel, bot, bot_mid) && InPositiveHalfPlane(pixel, mid, mid_top) && InPositiveHalfPlane(pixel, top, top_bot);
// if (PixelInTriangle) {
// Proj_vertex v[3];
// for (int i = 0; i < 3; i++) v[i] = proj_vertexs[i];
// //printf("Before %f %f %f \n", p_vertexs[0].y, p_vertexs[1].y, p_vertexs[2].y);
// if (v[0].y < v[1].y) swap(v[0], v[1]);
// if (v[1].y < v[2].y) swap(v[1], v[2]);
// if (v[0].y < v[1].y) swap(v[0], v[1]);
// float I1 = v[0]._z, I2 = v[1]._z, I3 = v[2]._z;
// float X1 = v[0].x, X2 = v[1].x, X3 = v[2].x;
// float Xa, Xb;
// float Ia, Ib, Ip;
// if (pixel.y > v[1].y) {
// Ia = Interpolate(v[0].y, I1, v[1].y, I2, pixel.y);
// Xa = Interpolate(v[0].y, v[0].x, v[1].y, v[1].x, pixel.y);
// }
// else {
// Ia = Interpolate(v[2].y, I3, v[1].y, I2, pixel.y);
// Xa = Interpolate(v[2].y, v[2].x, v[1].y, v[1].x, pixel.y);
// }
// Ib = Interpolate(v[0].y, I1, v[2].y, I3, pixel.y);
// Xb = Interpolate(v[0].y, v[0].x, v[2].y, v[2].x, pixel.y);
// Ip = Interpolate(Xa, Ia, Xb, Ib, pixel.x);
// float& pixel_z = Ip;
// //printf("after %f %f %f \n", p_vertexs[0].y, p_vertexs[1].y, p_vertexs[2].y);
//
// while ( (z_buffer + 1920 * y + x)->mutex == true ) continue;
// (z_buffer + 1920 * y + x)->mutex = true;
// if (1.0f / (z_buffer + 1920 * y + x)->z > pixel_z) {
// * (display_buffer + 1920 * y + x) = polygon_color;
// }
// (z_buffer + 1920 * y + x)->mutex = false;
// }
//
// }
}
}
void GraphicEngine::CreateFlatFrame() {
const Vertex3D const camera_position = *(camera_->GetPosition());
const Vector3D vector_x = camera_->GetVectorX();
const Vector3D vector_y = camera_->GetVectorY();
const Vector3D vector_z = camera_->GetDirection();
const float distance_to_projection_plane = camera_->GetDistanceToProjPlane();
Vertex3D* const device_vertexs_3d = device_data_.deviceVertexs;
Vertex2D* const device_vertexs_2d = device_vertexs_2d_;
Polygon3D* const device_polygons = device_data_.devicePolygons;
RgbColor color;
color.rgb_blue = 255;
color.rgb_green = 255;
color.rgb_red = 255;
cudaMemset(z_mutex_, 0, display_width_ * display_height_ * sizeof(z_mutex));
const unsigned int number_of_threads = 1024;
unsigned int number_of_blocks = (data_info_.numberOfVertexs + number_of_threads - 1) / number_of_threads;
SetDisplayBufferColor(color);
ProjectVertexs <<<number_of_blocks, number_of_threads >>> (device_vertexs_3d, device_vertexs_2d, data_info_.numberOfVertexs, camera_position, vector_x, vector_y, vector_z, distance_to_projection_plane);
ConvertInDisplayCoordinats <<<number_of_blocks, number_of_threads >>> (device_vertexs_2d, data_info_.numberOfVertexs, display_width_, display_height_);
number_of_blocks = (data_info_.numberOfPolygons * threads_per_triangle_ + number_of_threads - 1) / number_of_threads;
InfoForPainting info = { device_vertexs_3d, device_polygons, data_info_.numberOfPolygons, /*(RgbPixel*)device_data_.device_colors, data_info_.numberOfRgbColors,*/ threads_per_triangle_, display_width_, display_height_ };
DrawPolygons <<< number_of_blocks, number_of_threads >>> (z_mutex_, device_display_buffer_, device_vertexs_2d, info);
//system("pause");
}
|
1bc77a06e29c90f0213427a9ea138b70e38b4c86.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "YoloConfigs.h"
#include "YoloLayer.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/):mThreadCount(cudaThread)
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin()
{
if(mInputBuffer)
CUDA_CHECK(hipHostFree(mInputBuffer));
if(mOutputBuffer)
CUDA_CHECK(hipHostFree(mOutputBuffer));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer)
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize()
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
int totalCount = 0;
for(const auto& yolo : mYoloKernel)
totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT;
CUDA_CHECK(hipHostMalloc(&mInputBuffer, totalCount * sizeof(float), hipHostMallocDefault));
totalCount = 0;//detection count
for(const auto& yolo : mYoloKernel)
totalCount += yolo.width*yolo.height * CHECK_COUNT;
CUDA_CHECK(hipHostMalloc(&mOutputBuffer, sizeof(float) + totalCount * sizeof(Detection), hipHostMallocDefault));
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalCount = 0;
for(const auto& yolo : mYoloKernel)
totalCount += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalCount + 1, 1, 1);
}
void YoloLayerPlugin::forwardCpu(const float*const * inputs, float* outputs, hipStream_t stream)
{
auto Logist = [=](float data){
return 1./(1. + exp(-data));
};
CUDA_CHECK(hipStreamSynchronize(stream));
int i = 0;
float* inputData = (float *)mInputBuffer;
for(const auto& yolo : mYoloKernel)
{
int size = (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT;
CUDA_CHECK(hipMemcpyAsync(inputData, inputs[i], size * sizeof(float), hipMemcpyDeviceToHost, stream));
inputData += size;
++ i;
}
inputData = (float *)mInputBuffer;
std::vector <Detection> result;
for (const auto& yolo : mYoloKernel)
{
int stride = yolo.width*yolo.height;
for (int j = 0;j < stride ;++j)
{
for (int k = 0;k < CHECK_COUNT; ++k )
{
int beginIdx = (LOCATIONS + 1 + mClassCount)* stride *k + j;
int objIndex = beginIdx + LOCATIONS*stride;
//check obj
float objProb = Logist(inputData[objIndex]);
if(objProb <= IGNORE_THRESH)
continue;
//classes
int classId = -1;
float maxProb = IGNORE_THRESH;
for (int c = 0;c< mClassCount;++c){
float cProb = Logist(inputData[beginIdx + (5 + c) * stride]) * objProb;
if(cProb > maxProb){
maxProb = cProb;
classId = c;
}
}
if(classId >= 0) {
Detection det;
int row = j / yolo.width;
int cols = j % yolo.width;
//Location
det.bbox[0] = (cols + Logist(inputData[beginIdx]))/ yolo.width;
det.bbox[1] = (row + Logist(inputData[beginIdx+stride]))/ yolo.height;
det.bbox[2] = exp(inputData[beginIdx+2*stride]) * yolo.anchors[2*k];
det.bbox[3] = exp(inputData[beginIdx+3*stride]) * yolo.anchors[2*k + 1];
det.classId = classId;
det.prob = maxProb;
//det.objectness = objProb;
result.emplace_back(det);
}
}
}
inputData += (LOCATIONS + 1 + mClassCount) * stride * CHECK_COUNT;
}
int detCount =result.size();
auto data = (float *)mOutputBuffer;
//copy count;
data[0] = (float)detCount;
//std::cout << "detCount"<< detCount << std::endl;
data++;
//copy result
memcpy(data,result.data(),result.size()*sizeof(Detection));
//(count + det result)
CUDA_CHECK(hipMemcpyAsync(outputs, mOutputBuffer, sizeof(float) + result.size()*sizeof(Detection), hipMemcpyHostToDevice, stream));
};
__device__ float Logist(float data){ return 1./(1. + exp(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int stride = yoloWidth*yoloHeight;
for (int k = 0;k < CHECK_COUNT; ++k )
{
int beginIdx = (LOCATIONS + 1 + classes)* stride *k + idx;
int objIndex = beginIdx + LOCATIONS*stride;
//check objectness
float objProb = Logist(input[objIndex]);
if(objProb <= IGNORE_THRESH)
continue;
int row = idx / yoloWidth;
int cols = idx % yoloWidth;
//classes
int classId = -1;
float maxProb = IGNORE_THRESH;
for (int c = 0;c<classes;++c){
float cProb = Logist(input[beginIdx + (5 + c) * stride]) * objProb;
if(cProb > maxProb){
maxProb = cProb;
classId = c;
}
}
if(classId >= 0) {
int resCount = (int)atomicAdd(output,1);
char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection);
Detection* det = (Detection*)(data);
//Location
det->bbox[0] = (cols + Logist(input[beginIdx]))/ yoloWidth;
det->bbox[1] = (row + Logist(input[beginIdx+stride]))/ yoloHeight;
det->bbox[2] = exp(input[beginIdx+2*stride]) * anchors[2*k];
det->bbox[3] = exp(input[beginIdx+3*stride]) * anchors[2*k + 1];
det->classId = classId;
det->prob = maxProb;
}
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs,float * output,hipStream_t stream) {
int numElem;
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(hipMalloc(&devAnchor,AnchorLen));
//first detect count init 0
CUDA_CHECK(hipMemset(output, 0, sizeof(float)));
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height;
//copy anchor to device
CUDA_CHECK(hipMemcpy(devAnchor,yolo.anchors,AnchorLen,hipMemcpyHostToDevice));
hipLaunchKernelGGL(( CalDetection), dim3((yolo.width*yolo.height + mThreadCount - 1) / mThreadCount), dim3(mThreadCount), 0, 0,
inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount);
}
CUDA_CHECK(hipFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, hipStream_t stream)
{
assert(batchSize == 1);
//GPU
forwardGpu((const float *const *)inputs,(float *)outputs[0],stream);
//CPU
//forwardCpu((const float *const *)inputs,(float *)outputs[0],stream);
return 0;
};
}
| 1bc77a06e29c90f0213427a9ea138b70e38b4c86.cu | #include "YoloConfigs.h"
#include "YoloLayer.h"
using namespace Yolo;
namespace nvinfer1
{
YoloLayerPlugin::YoloLayerPlugin(const int cudaThread /*= 512*/):mThreadCount(cudaThread)
{
mClassCount = CLASS_NUM;
mYoloKernel.clear();
mYoloKernel.push_back(yolo1);
mYoloKernel.push_back(yolo2);
mYoloKernel.push_back(yolo3);
mKernelCount = mYoloKernel.size();
}
YoloLayerPlugin::~YoloLayerPlugin()
{
if(mInputBuffer)
CUDA_CHECK(cudaFreeHost(mInputBuffer));
if(mOutputBuffer)
CUDA_CHECK(cudaFreeHost(mOutputBuffer));
}
// create the plugin at runtime from a byte stream
YoloLayerPlugin::YoloLayerPlugin(const void* data, size_t length)
{
using namespace Tn;
const char *d = reinterpret_cast<const char *>(data), *a = d;
read(d, mClassCount);
read(d, mThreadCount);
read(d, mKernelCount);
mYoloKernel.resize(mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(mYoloKernel.data(),d,kernelSize);
d += kernelSize;
assert(d == a + length);
}
void YoloLayerPlugin::serialize(void* buffer)
{
using namespace Tn;
char* d = static_cast<char*>(buffer), *a = d;
write(d, mClassCount);
write(d, mThreadCount);
write(d, mKernelCount);
auto kernelSize = mKernelCount*sizeof(YoloKernel);
memcpy(d,mYoloKernel.data(),kernelSize);
d += kernelSize;
assert(d == a + getSerializationSize());
}
size_t YoloLayerPlugin::getSerializationSize()
{
return sizeof(mClassCount) + sizeof(mThreadCount) + sizeof(mKernelCount) + sizeof(Yolo::YoloKernel) * mYoloKernel.size();
}
int YoloLayerPlugin::initialize()
{
int totalCount = 0;
for(const auto& yolo : mYoloKernel)
totalCount += (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT;
CUDA_CHECK(cudaHostAlloc(&mInputBuffer, totalCount * sizeof(float), cudaHostAllocDefault));
totalCount = 0;//detection count
for(const auto& yolo : mYoloKernel)
totalCount += yolo.width*yolo.height * CHECK_COUNT;
CUDA_CHECK(cudaHostAlloc(&mOutputBuffer, sizeof(float) + totalCount * sizeof(Detection), cudaHostAllocDefault));
return 0;
}
Dims YoloLayerPlugin::getOutputDimensions(int index, const Dims* inputs, int nbInputDims)
{
//output the result to channel
int totalCount = 0;
for(const auto& yolo : mYoloKernel)
totalCount += yolo.width*yolo.height * CHECK_COUNT * sizeof(Detection) / sizeof(float);
return Dims3(totalCount + 1, 1, 1);
}
void YoloLayerPlugin::forwardCpu(const float*const * inputs, float* outputs, cudaStream_t stream)
{
auto Logist = [=](float data){
return 1./(1. + exp(-data));
};
CUDA_CHECK(cudaStreamSynchronize(stream));
int i = 0;
float* inputData = (float *)mInputBuffer;
for(const auto& yolo : mYoloKernel)
{
int size = (LOCATIONS + 1 + mClassCount) * yolo.width*yolo.height * CHECK_COUNT;
CUDA_CHECK(cudaMemcpyAsync(inputData, inputs[i], size * sizeof(float), cudaMemcpyDeviceToHost, stream));
inputData += size;
++ i;
}
inputData = (float *)mInputBuffer;
std::vector <Detection> result;
for (const auto& yolo : mYoloKernel)
{
int stride = yolo.width*yolo.height;
for (int j = 0;j < stride ;++j)
{
for (int k = 0;k < CHECK_COUNT; ++k )
{
int beginIdx = (LOCATIONS + 1 + mClassCount)* stride *k + j;
int objIndex = beginIdx + LOCATIONS*stride;
//check obj
float objProb = Logist(inputData[objIndex]);
if(objProb <= IGNORE_THRESH)
continue;
//classes
int classId = -1;
float maxProb = IGNORE_THRESH;
for (int c = 0;c< mClassCount;++c){
float cProb = Logist(inputData[beginIdx + (5 + c) * stride]) * objProb;
if(cProb > maxProb){
maxProb = cProb;
classId = c;
}
}
if(classId >= 0) {
Detection det;
int row = j / yolo.width;
int cols = j % yolo.width;
//Location
det.bbox[0] = (cols + Logist(inputData[beginIdx]))/ yolo.width;
det.bbox[1] = (row + Logist(inputData[beginIdx+stride]))/ yolo.height;
det.bbox[2] = exp(inputData[beginIdx+2*stride]) * yolo.anchors[2*k];
det.bbox[3] = exp(inputData[beginIdx+3*stride]) * yolo.anchors[2*k + 1];
det.classId = classId;
det.prob = maxProb;
//det.objectness = objProb;
result.emplace_back(det);
}
}
}
inputData += (LOCATIONS + 1 + mClassCount) * stride * CHECK_COUNT;
}
int detCount =result.size();
auto data = (float *)mOutputBuffer;
//copy count;
data[0] = (float)detCount;
//std::cout << "detCount"<< detCount << std::endl;
data++;
//copy result
memcpy(data,result.data(),result.size()*sizeof(Detection));
//(count + det result)
CUDA_CHECK(cudaMemcpyAsync(outputs, mOutputBuffer, sizeof(float) + result.size()*sizeof(Detection), cudaMemcpyHostToDevice, stream));
};
__device__ float Logist(float data){ return 1./(1. + exp(-data)); };
__global__ void CalDetection(const float *input, float *output,int noElements,
int yoloWidth,int yoloHeight,const float anchors[CHECK_COUNT*2],int classes) {
int idx = threadIdx.x + blockDim.x * blockIdx.x;
if (idx >= noElements) return;
int stride = yoloWidth*yoloHeight;
for (int k = 0;k < CHECK_COUNT; ++k )
{
int beginIdx = (LOCATIONS + 1 + classes)* stride *k + idx;
int objIndex = beginIdx + LOCATIONS*stride;
//check objectness
float objProb = Logist(input[objIndex]);
if(objProb <= IGNORE_THRESH)
continue;
int row = idx / yoloWidth;
int cols = idx % yoloWidth;
//classes
int classId = -1;
float maxProb = IGNORE_THRESH;
for (int c = 0;c<classes;++c){
float cProb = Logist(input[beginIdx + (5 + c) * stride]) * objProb;
if(cProb > maxProb){
maxProb = cProb;
classId = c;
}
}
if(classId >= 0) {
int resCount = (int)atomicAdd(output,1);
char* data = (char * )output + sizeof(float) + resCount*sizeof(Detection);
Detection* det = (Detection*)(data);
//Location
det->bbox[0] = (cols + Logist(input[beginIdx]))/ yoloWidth;
det->bbox[1] = (row + Logist(input[beginIdx+stride]))/ yoloHeight;
det->bbox[2] = exp(input[beginIdx+2*stride]) * anchors[2*k];
det->bbox[3] = exp(input[beginIdx+3*stride]) * anchors[2*k + 1];
det->classId = classId;
det->prob = maxProb;
}
}
}
void YoloLayerPlugin::forwardGpu(const float *const * inputs,float * output,cudaStream_t stream) {
int numElem;
void* devAnchor;
size_t AnchorLen = sizeof(float)* CHECK_COUNT*2;
CUDA_CHECK(cudaMalloc(&devAnchor,AnchorLen));
//first detect count init 0
CUDA_CHECK(cudaMemset(output, 0, sizeof(float)));
for (unsigned int i = 0;i< mYoloKernel.size();++i)
{
const auto& yolo = mYoloKernel[i];
numElem = yolo.width*yolo.height;
//copy anchor to device
CUDA_CHECK(cudaMemcpy(devAnchor,yolo.anchors,AnchorLen,cudaMemcpyHostToDevice));
CalDetection<<< (yolo.width*yolo.height + mThreadCount - 1) / mThreadCount, mThreadCount>>>
(inputs[i],output, numElem, yolo.width, yolo.height, (float *)devAnchor, mClassCount);
}
CUDA_CHECK(cudaFree(devAnchor));
}
int YoloLayerPlugin::enqueue(int batchSize, const void*const * inputs, void** outputs, void* workspace, cudaStream_t stream)
{
assert(batchSize == 1);
//GPU
forwardGpu((const float *const *)inputs,(float *)outputs[0],stream);
//CPU
//forwardCpu((const float *const *)inputs,(float *)outputs[0],stream);
return 0;
};
}
|
485efe91ddda37a972566c9a299471e74b564bbb.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
//
// auto-generated by op2.py
//
//user function
__device__ void update_gpu( const float *qold, float *q, float *res, const float *adt,
float *rms) {
float del, adti;
float rmsl = 0.0f;
adti = 1.0f / (*adt);
for (int n = 0; n < 4; n++) {
del = adti * res[n];
q[n] = qold[n] - del;
res[n] = 0.0f;
rmsl += del * del;
}
*rms += rmsl;
}
// CUDA kernel function
__global__ void op_cuda_update(
const float *__restrict arg0,
float *arg1,
float *arg2,
const float *__restrict arg3,
float *arg4,
int set_size ) {
float arg4_l[1];
for ( int d=0; d<1; d++ ){
arg4_l[d]=ZERO_float;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
update_gpu(arg0+n*4,
arg1+n*4,
arg2+n*4,
arg3+n*1,
arg4_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
}
//host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4){
float*arg4h = (float *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(4);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((float *)arg4.data)[d+b*1] = ZERO_float;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
hipLaunchKernelGGL(( op_cuda_update), dim3(nblocks),dim3(nthread),nshared, 0,
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg4h[d] = arg4h[d] + ((float *)arg4.data)[d+b*1];
}
}
arg4.data = (char *)arg4h;
op_mpi_reduce(&arg4,arg4h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(hipDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg3.size;
}
| 485efe91ddda37a972566c9a299471e74b564bbb.cu | //
// auto-generated by op2.py
//
//user function
__device__ void update_gpu( const float *qold, float *q, float *res, const float *adt,
float *rms) {
float del, adti;
float rmsl = 0.0f;
adti = 1.0f / (*adt);
for (int n = 0; n < 4; n++) {
del = adti * res[n];
q[n] = qold[n] - del;
res[n] = 0.0f;
rmsl += del * del;
}
*rms += rmsl;
}
// CUDA kernel function
__global__ void op_cuda_update(
const float *__restrict arg0,
float *arg1,
float *arg2,
const float *__restrict arg3,
float *arg4,
int set_size ) {
float arg4_l[1];
for ( int d=0; d<1; d++ ){
arg4_l[d]=ZERO_float;
}
//process set elements
for ( int n=threadIdx.x+blockIdx.x*blockDim.x; n<set_size; n+=blockDim.x*gridDim.x ){
//user-supplied kernel call
update_gpu(arg0+n*4,
arg1+n*4,
arg2+n*4,
arg3+n*1,
arg4_l);
}
//global reductions
for ( int d=0; d<1; d++ ){
op_reduction<OP_INC>(&arg4[d+blockIdx.x*1],arg4_l[d]);
}
}
//host stub function
void op_par_loop_update(char const *name, op_set set,
op_arg arg0,
op_arg arg1,
op_arg arg2,
op_arg arg3,
op_arg arg4){
float*arg4h = (float *)arg4.data;
int nargs = 5;
op_arg args[5];
args[0] = arg0;
args[1] = arg1;
args[2] = arg2;
args[3] = arg3;
args[4] = arg4;
// initialise timers
double cpu_t1, cpu_t2, wall_t1, wall_t2;
op_timing_realloc(4);
op_timers_core(&cpu_t1, &wall_t1);
OP_kernels[4].name = name;
OP_kernels[4].count += 1;
if (OP_diags>2) {
printf(" kernel routine w/o indirection: update");
}
op_mpi_halo_exchanges_cuda(set, nargs, args);
if (set->size > 0) {
//set CUDA execution parameters
#ifdef OP_BLOCK_SIZE_4
int nthread = OP_BLOCK_SIZE_4;
#else
int nthread = OP_block_size;
// int nthread = 128;
#endif
int nblocks = 200;
//transfer global reduction data to GPU
int maxblocks = nblocks;
int reduct_bytes = 0;
int reduct_size = 0;
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
reduct_size = MAX(reduct_size,sizeof(float));
reallocReductArrays(reduct_bytes);
reduct_bytes = 0;
arg4.data = OP_reduct_h + reduct_bytes;
arg4.data_d = OP_reduct_d + reduct_bytes;
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
((float *)arg4.data)[d+b*1] = ZERO_float;
}
}
reduct_bytes += ROUND_UP(maxblocks*1*sizeof(float));
mvReductArraysToDevice(reduct_bytes);
int nshared = reduct_size*nthread;
op_cuda_update<<<nblocks,nthread,nshared>>>(
(float *) arg0.data_d,
(float *) arg1.data_d,
(float *) arg2.data_d,
(float *) arg3.data_d,
(float *) arg4.data_d,
set->size );
//transfer global reduction data back to CPU
mvReductArraysToHost(reduct_bytes);
for ( int b=0; b<maxblocks; b++ ){
for ( int d=0; d<1; d++ ){
arg4h[d] = arg4h[d] + ((float *)arg4.data)[d+b*1];
}
}
arg4.data = (char *)arg4h;
op_mpi_reduce(&arg4,arg4h);
}
op_mpi_set_dirtybit_cuda(nargs, args);
cutilSafeCall(cudaDeviceSynchronize());
//update kernel record
op_timers_core(&cpu_t2, &wall_t2);
OP_kernels[4].time += wall_t2 - wall_t1;
OP_kernels[4].transfer += (float)set->size * arg0.size;
OP_kernels[4].transfer += (float)set->size * arg1.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg2.size * 2.0f;
OP_kernels[4].transfer += (float)set->size * arg3.size;
}
|
57736cf6b5a933063fd8d3410f612e40c5f522e2.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/iterator/discard_iterator.h>
#ifdef THRUST_TEST_DEVICE_SIDE
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4>
__global__
void set_intersection_kernel(ExecutionPolicy exec, Iterator1 first1, Iterator1 last1,
Iterator2 first2, Iterator2 last2,
Iterator3 result1,
Iterator4 result2)
{
*result2 = thrust::set_intersection(exec, first1, last1, first2, last2, result1);
}
template<typename ExecutionPolicy>
void TestSetIntersectionDevice(ExecutionPolicy exec)
{
typedef thrust::device_vector<int> Vector;
typedef Vector::iterator Iterator;
Vector a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
Vector ref(2);
ref[0] = 0; ref[1] = 4;
Vector result(2);
thrust::device_vector<Iterator> end_vec(1);
hipLaunchKernelGGL(( set_intersection_kernel), dim3(1),dim3(1), 0, 0, exec, a.begin(), a.end(), b.begin(), b.end(), result.begin(), end_vec.begin());
hipError_t const err = hipDeviceSynchronize();
ASSERT_EQUAL(hipSuccess, err);
Iterator end = end_vec.front();
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
}
void TestSetIntersectionDeviceSeq()
{
TestSetIntersectionDevice(thrust::seq);
}
DECLARE_UNITTEST(TestSetIntersectionDeviceSeq);
void TestSetIntersectionDeviceDevice()
{
TestSetIntersectionDevice(thrust::device);
}
DECLARE_UNITTEST(TestSetIntersectionDeviceDevice);
void TestSetIntersectionDeviceNoSync()
{
TestSetIntersectionDevice(thrust::hip::par_nosync);
}
DECLARE_UNITTEST(TestSetIntersectionDeviceNoSync);
#endif
template<typename ExecutionPolicy>
void TestSetIntersectionCudaStreams(ExecutionPolicy policy)
{
typedef thrust::device_vector<int> Vector;
typedef Vector::iterator Iterator;
Vector a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
Vector ref(2);
ref[0] = 0; ref[1] = 4;
Vector result(2);
hipStream_t s;
hipStreamCreate(&s);
auto streampolicy = policy.on(s);
Iterator end = thrust::set_intersection(streampolicy,
a.begin(), a.end(),
b.begin(), b.end(),
result.begin());
hipStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
hipStreamDestroy(s);
}
void TestSetIntersectionCudaStreamsSync()
{
TestSetIntersectionCudaStreams(thrust::hip::par);
}
DECLARE_UNITTEST(TestSetIntersectionCudaStreamsSync);
void TestSetIntersectionCudaStreamsNoSync()
{
TestSetIntersectionCudaStreams(thrust::hip::par_nosync);
}
DECLARE_UNITTEST(TestSetIntersectionCudaStreamsNoSync);
| 57736cf6b5a933063fd8d3410f612e40c5f522e2.cu | #include <unittest/unittest.h>
#include <thrust/set_operations.h>
#include <thrust/functional.h>
#include <thrust/sort.h>
#include <thrust/extrema.h>
#include <thrust/iterator/discard_iterator.h>
#ifdef THRUST_TEST_DEVICE_SIDE
template<typename ExecutionPolicy, typename Iterator1, typename Iterator2, typename Iterator3, typename Iterator4>
__global__
void set_intersection_kernel(ExecutionPolicy exec, Iterator1 first1, Iterator1 last1,
Iterator2 first2, Iterator2 last2,
Iterator3 result1,
Iterator4 result2)
{
*result2 = thrust::set_intersection(exec, first1, last1, first2, last2, result1);
}
template<typename ExecutionPolicy>
void TestSetIntersectionDevice(ExecutionPolicy exec)
{
typedef thrust::device_vector<int> Vector;
typedef Vector::iterator Iterator;
Vector a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
Vector ref(2);
ref[0] = 0; ref[1] = 4;
Vector result(2);
thrust::device_vector<Iterator> end_vec(1);
set_intersection_kernel<<<1,1>>>(exec, a.begin(), a.end(), b.begin(), b.end(), result.begin(), end_vec.begin());
cudaError_t const err = cudaDeviceSynchronize();
ASSERT_EQUAL(cudaSuccess, err);
Iterator end = end_vec.front();
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
}
void TestSetIntersectionDeviceSeq()
{
TestSetIntersectionDevice(thrust::seq);
}
DECLARE_UNITTEST(TestSetIntersectionDeviceSeq);
void TestSetIntersectionDeviceDevice()
{
TestSetIntersectionDevice(thrust::device);
}
DECLARE_UNITTEST(TestSetIntersectionDeviceDevice);
void TestSetIntersectionDeviceNoSync()
{
TestSetIntersectionDevice(thrust::cuda::par_nosync);
}
DECLARE_UNITTEST(TestSetIntersectionDeviceNoSync);
#endif
template<typename ExecutionPolicy>
void TestSetIntersectionCudaStreams(ExecutionPolicy policy)
{
typedef thrust::device_vector<int> Vector;
typedef Vector::iterator Iterator;
Vector a(3), b(4);
a[0] = 0; a[1] = 2; a[2] = 4;
b[0] = 0; b[1] = 3; b[2] = 3; b[3] = 4;
Vector ref(2);
ref[0] = 0; ref[1] = 4;
Vector result(2);
cudaStream_t s;
cudaStreamCreate(&s);
auto streampolicy = policy.on(s);
Iterator end = thrust::set_intersection(streampolicy,
a.begin(), a.end(),
b.begin(), b.end(),
result.begin());
cudaStreamSynchronize(s);
ASSERT_EQUAL_QUIET(result.end(), end);
ASSERT_EQUAL(ref, result);
cudaStreamDestroy(s);
}
void TestSetIntersectionCudaStreamsSync()
{
TestSetIntersectionCudaStreams(thrust::cuda::par);
}
DECLARE_UNITTEST(TestSetIntersectionCudaStreamsSync);
void TestSetIntersectionCudaStreamsNoSync()
{
TestSetIntersectionCudaStreams(thrust::cuda::par_nosync);
}
DECLARE_UNITTEST(TestSetIntersectionCudaStreamsNoSync);
|
d3020c4d1889aacb07d9f7717a3e1895eb9f638d.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "uplo_exp2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
hipMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
hipMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
uplo_exp2), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
uplo_exp2), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
uplo_exp2), dim3(gridBlock),dim3(threadBlock), 0, 0, sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | d3020c4d1889aacb07d9f7717a3e1895eb9f638d.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "uplo_exp2.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
const int sd = 1;
const int unit = 1;
const int bottom = 1;
const REAL *a = NULL;
cudaMalloc(&a, XSIZE*YSIZE);
const int offset_a = 1;
const int ld_a = 1;
REAL *b = NULL;
cudaMalloc(&b, XSIZE*YSIZE);
const int offset_b = 1;
const int ld_b = 1;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
uplo_exp2<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
uplo_exp2<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
uplo_exp2<<<gridBlock,threadBlock>>>(sd,unit,bottom,a,offset_a,ld_a,b,offset_b,ld_b);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
932593afd95e5974fa81f16681512bb9a697624a.hip | // !!! This is a file automatically generated by hipify!!!
#include "nbodysim.h"
#include <hip/hip_runtime.h>
#include <hip/hip_runtime_api.h>
#include <stdio.h>
/// GPU leapfrog integration (both steps are implemented in the same function)
__global__ void leapfrog_integrate(simdata_t *d_sdata, float *d_acceleration,
float dt, bool before_accel_update) {
int idx = threadIdx.x + blockIdx.x * 1024;
if (idx >= d_sdata->nparticles)
return;
float *d_pos = simdata_pos_ptr(d_sdata, idx);
float *d_vel = simdata_vel_ptr(d_sdata, idx);
float *d_accel = d_acceleration + d_sdata->posdim * idx;
for (int i = 0; i < d_sdata->posdim; i++) {
d_vel[i] += d_accel[i] * dt * 0.5;
if (before_accel_update) {
d_pos[i] += d_vel[i] * dt;
}
}
}
/// Euler integration
__global__ void euler_integrate(simdata_t *d_sdata, float *d_acceleration,
float dt) {
int idx = threadIdx.x + blockIdx.x * 1024;
if (idx >= d_sdata->nparticles)
return;
float *d_pos = simdata_pos_ptr(d_sdata, idx);
float *d_vel = simdata_vel_ptr(d_sdata, idx);
float *d_accel = d_acceleration + d_sdata->posdim * idx;
for (int i = 0; i < d_sdata->posdim; i++) {
d_pos[i] += d_vel[i] * dt + 0.5 * d_accel[i] * dt * dt;
d_vel[i] += d_accel[i] * dt;
}
}
| 932593afd95e5974fa81f16681512bb9a697624a.cu | #include "nbodysim.h"
#include <cuda.h>
#include <cuda_runtime_api.h>
#include <stdio.h>
/// GPU leapfrog integration (both steps are implemented in the same function)
__global__ void leapfrog_integrate(simdata_t *d_sdata, float *d_acceleration,
float dt, bool before_accel_update) {
int idx = threadIdx.x + blockIdx.x * 1024;
if (idx >= d_sdata->nparticles)
return;
float *d_pos = simdata_pos_ptr(d_sdata, idx);
float *d_vel = simdata_vel_ptr(d_sdata, idx);
float *d_accel = d_acceleration + d_sdata->posdim * idx;
for (int i = 0; i < d_sdata->posdim; i++) {
d_vel[i] += d_accel[i] * dt * 0.5;
if (before_accel_update) {
d_pos[i] += d_vel[i] * dt;
}
}
}
/// Euler integration
__global__ void euler_integrate(simdata_t *d_sdata, float *d_acceleration,
float dt) {
int idx = threadIdx.x + blockIdx.x * 1024;
if (idx >= d_sdata->nparticles)
return;
float *d_pos = simdata_pos_ptr(d_sdata, idx);
float *d_vel = simdata_vel_ptr(d_sdata, idx);
float *d_accel = d_acceleration + d_sdata->posdim * idx;
for (int i = 0; i < d_sdata->posdim; i++) {
d_pos[i] += d_vel[i] * dt + 0.5 * d_accel[i] * dt * dt;
d_vel[i] += d_accel[i] * dt;
}
}
|
8bdb5d3fb12ba27054fa102d63f1d28763aabba1.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#define DATA_SIZE 10000000
#define BLOCKS_COUNT 1
#define THREADS_COUNT 64
__global__ void findMean(unsigned int dataForBlock, float *inputData, float *results)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
for (int i = 0; i < dataForBlock; i++)
{
result += inputData[index * dataForBlock + i];
}
result /= dataForBlock;
results[index] = result;
}
void processWithGPU(float *blocks, float *results, unsigned int blockSize, unsigned int blocksCount)
{
unsigned int realDataCount = blockSize * blocksCount;
hipSetDevice(0);
float *deviceInputData, *deviceResults;
hipMalloc((void **)&deviceInputData, realDataCount * sizeof(float));
hipMalloc((void **)&deviceResults, realDataCount * sizeof(float));
hipMemcpy(deviceInputData, blocks, realDataCount * sizeof(float), hipMemcpyHostToDevice);
hipLaunchKernelGGL(( findMean), dim3(1), dim3(blocksCount), 0, 0, blockSize, deviceInputData, deviceResults);
hipMemcpy((void *)results, deviceResults, blocksCount * sizeof(float), hipMemcpyDeviceToHost);
hipFree(deviceInputData);
hipFree(deviceResults);
}
| 8bdb5d3fb12ba27054fa102d63f1d28763aabba1.cu | #define DATA_SIZE 10000000
#define BLOCKS_COUNT 1
#define THREADS_COUNT 64
__global__ void findMean(unsigned int dataForBlock, float *inputData, float *results)
{
int index = blockIdx.x * blockDim.x + threadIdx.x;
float result = 0;
for (int i = 0; i < dataForBlock; i++)
{
result += inputData[index * dataForBlock + i];
}
result /= dataForBlock;
results[index] = result;
}
void processWithGPU(float *blocks, float *results, unsigned int blockSize, unsigned int blocksCount)
{
unsigned int realDataCount = blockSize * blocksCount;
cudaSetDevice(0);
float *deviceInputData, *deviceResults;
cudaMalloc((void **)&deviceInputData, realDataCount * sizeof(float));
cudaMalloc((void **)&deviceResults, realDataCount * sizeof(float));
cudaMemcpy(deviceInputData, blocks, realDataCount * sizeof(float), cudaMemcpyHostToDevice);
findMean<<<1, blocksCount>>>(blockSize, deviceInputData, deviceResults);
cudaMemcpy((void *)results, deviceResults, blocksCount * sizeof(float), cudaMemcpyDeviceToHost);
cudaFree(deviceInputData);
cudaFree(deviceResults);
}
|
4ba4d234dbf36d69a50364d380d948000f64568b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file density_rg.cu
*
* \brief CUDA kernel to update density on the regular FFT grid.
*/
#include "../SDDK/GPU/cuda_common.hpp"
#include "../SDDK/GPU/acc_runtime.hpp"
__global__ void update_density_rg_1_complex_gpu_kernel(int size__,
acc_complex_double_t const* psi_rg__,
double wt__,
double* density_rg__)
{
int ir = blockIdx.x * blockDim.x + threadIdx.x;
if (ir < size__) {
acc_complex_double_t z = psi_rg__[ir];
density_rg__[ir] += (z.x * z.x + z.y * z.y) * wt__;
}
}
/* Update one density component from one complex wave-function */
extern "C" void update_density_rg_1_complex_gpu(int size__,
acc_complex_double_t const* psi_rg__,
double wt__,
double* density_rg__)
{
//CUDA_timer t("update_density_rg_1_gpu");
dim3 grid_t(64);
dim3 grid_b(num_blocks(size__, grid_t.x));
accLaunchKernel((update_density_rg_1_complex_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
size__,
psi_rg__,
wt__,
density_rg__
);
}
__global__ void update_density_rg_1_real_gpu_kernel(int size__,
double const* psi_rg__,
double wt__,
double* density_rg__)
{
int ir = blockIdx.x * blockDim.x + threadIdx.x;
if (ir < size__) {
double p = psi_rg__[ir];
density_rg__[ir] += p * p * wt__;
}
}
/* Update one density component from one real wave-function */
extern "C" void update_density_rg_1_real_gpu(int size__,
double const* psi_rg__,
double wt__,
double* density_rg__)
{
//CUDA_timer t("update_density_rg_1_gpu");
dim3 grid_t(64);
dim3 grid_b(num_blocks(size__, grid_t.x));
accLaunchKernel((update_density_rg_1_real_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
size__,
psi_rg__,
wt__,
density_rg__
);
}
__global__ void update_density_rg_2_gpu_kernel(int size__,
acc_complex_double_t const* psi_up_rg__,
acc_complex_double_t const* psi_dn_rg__,
double wt__,
double* density_x_rg__,
double* density_y_rg__)
{
int ir = blockIdx.x * blockDim.x + threadIdx.x;
if (ir < size__) {
acc_complex_double_t z = accCmul(psi_up_rg__[ir], accConj(psi_dn_rg__[ir]));
density_x_rg__[ir] += 2 * z.x * wt__;
density_y_rg__[ir] -= 2 * z.y * wt__;
}
}
/* Update off-diagonal density component in non-collinear case */
extern "C" void update_density_rg_2_gpu(int size__,
acc_complex_double_t const* psi_up_rg__,
acc_complex_double_t const* psi_dn_rg__,
double wt__,
double* density_x_rg__,
double* density_y_rg__)
{
//CUDA_timer t("update_density_rg_1_gpu");
dim3 grid_t(64);
dim3 grid_b(num_blocks(size__, grid_t.x));
accLaunchKernel((update_density_rg_2_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
size__,
psi_up_rg__,
psi_dn_rg__,
wt__,
density_x_rg__,
density_y_rg__
);
}
| 4ba4d234dbf36d69a50364d380d948000f64568b.cu | // Copyright (c) 2013-2018 Anton Kozhevnikov, Thomas Schulthess
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without modification, are permitted provided that
// the following conditions are met:
//
// 1. Redistributions of source code must retain the above copyright notice, this list of conditions and the
// following disclaimer.
// 2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions
// and the following disclaimer in the documentation and/or other materials provided with the distribution.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED
// WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
// PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
// ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
// PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
// CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
// OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
/** \file density_rg.cu
*
* \brief CUDA kernel to update density on the regular FFT grid.
*/
#include "../SDDK/GPU/cuda_common.hpp"
#include "../SDDK/GPU/acc_runtime.hpp"
__global__ void update_density_rg_1_complex_gpu_kernel(int size__,
acc_complex_double_t const* psi_rg__,
double wt__,
double* density_rg__)
{
int ir = blockIdx.x * blockDim.x + threadIdx.x;
if (ir < size__) {
acc_complex_double_t z = psi_rg__[ir];
density_rg__[ir] += (z.x * z.x + z.y * z.y) * wt__;
}
}
/* Update one density component from one complex wave-function */
extern "C" void update_density_rg_1_complex_gpu(int size__,
acc_complex_double_t const* psi_rg__,
double wt__,
double* density_rg__)
{
//CUDA_timer t("update_density_rg_1_gpu");
dim3 grid_t(64);
dim3 grid_b(num_blocks(size__, grid_t.x));
accLaunchKernel((update_density_rg_1_complex_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
size__,
psi_rg__,
wt__,
density_rg__
);
}
__global__ void update_density_rg_1_real_gpu_kernel(int size__,
double const* psi_rg__,
double wt__,
double* density_rg__)
{
int ir = blockIdx.x * blockDim.x + threadIdx.x;
if (ir < size__) {
double p = psi_rg__[ir];
density_rg__[ir] += p * p * wt__;
}
}
/* Update one density component from one real wave-function */
extern "C" void update_density_rg_1_real_gpu(int size__,
double const* psi_rg__,
double wt__,
double* density_rg__)
{
//CUDA_timer t("update_density_rg_1_gpu");
dim3 grid_t(64);
dim3 grid_b(num_blocks(size__, grid_t.x));
accLaunchKernel((update_density_rg_1_real_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
size__,
psi_rg__,
wt__,
density_rg__
);
}
__global__ void update_density_rg_2_gpu_kernel(int size__,
acc_complex_double_t const* psi_up_rg__,
acc_complex_double_t const* psi_dn_rg__,
double wt__,
double* density_x_rg__,
double* density_y_rg__)
{
int ir = blockIdx.x * blockDim.x + threadIdx.x;
if (ir < size__) {
acc_complex_double_t z = accCmul(psi_up_rg__[ir], accConj(psi_dn_rg__[ir]));
density_x_rg__[ir] += 2 * z.x * wt__;
density_y_rg__[ir] -= 2 * z.y * wt__;
}
}
/* Update off-diagonal density component in non-collinear case */
extern "C" void update_density_rg_2_gpu(int size__,
acc_complex_double_t const* psi_up_rg__,
acc_complex_double_t const* psi_dn_rg__,
double wt__,
double* density_x_rg__,
double* density_y_rg__)
{
//CUDA_timer t("update_density_rg_1_gpu");
dim3 grid_t(64);
dim3 grid_b(num_blocks(size__, grid_t.x));
accLaunchKernel((update_density_rg_2_gpu_kernel), dim3(grid_b), dim3(grid_t), 0, 0,
size__,
psi_up_rg__,
psi_dn_rg__,
wt__,
density_x_rg__,
density_y_rg__
);
}
|
49b47ff4e33310178f3c1712f33efc58ef09ea25.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void convolution1D(const int *d_arr, const int *d_conv, int *d_result, int N, int M) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int globalId = i*N + j;
if(globalId < N) {
int convSum = 0, cnum = 0, k = M/2;
for(int i=-k; i<=k; i++) {
if(globalId + i >= 0 && globalId + i < N && cnum < M) {
convSum += d_arr[globalId + i]*d_conv[cnum];
}
cnum++;
}
d_result[globalId] = convSum;
}
} | 49b47ff4e33310178f3c1712f33efc58ef09ea25.cu | #include "includes.h"
__global__ void convolution1D(const int *d_arr, const int *d_conv, int *d_result, int N, int M) {
int i = blockIdx.x*blockDim.x + threadIdx.x;
int j = blockIdx.y*blockDim.y + threadIdx.y;
int globalId = i*N + j;
if(globalId < N) {
int convSum = 0, cnum = 0, k = M/2;
for(int i=-k; i<=k; i++) {
if(globalId + i >= 0 && globalId + i < N && cnum < M) {
convSum += d_arr[globalId + i]*d_conv[cnum];
}
cnum++;
}
d_result[globalId] = convSum;
}
} |
ac76a558da6cda35638dd748db2aea14fe3abd7b.hip | // !!! This is a file automatically generated by hipify!!!
#include <cassert>
#include <cfloat>
#include <hip/hip_runtime_api.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define mm_BLOCK_SIZE_x 8
#define mm_BLOCK_SIZE 16
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define iSizeMultiple 4 //must be multipes of 15
#define WA (4 * mm_BLOCK_SIZE) // Matrix A width
#define HA (4 * mm_BLOCK_SIZE) // Matrix A height
//#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width
#define WB (60 * mm_BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
// __shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
// __shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
// AS(ty, tx) = A[a + wA * ty + tx];
// BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += A[a+wA * ty +k]*B[b + wB * k + tx];//AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// hipSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA * iSizeMultiple;
uiHA = HA * iSizeMultiple;
uiWB = WB * iSizeMultiple;
uiHB = HB * iSizeMultiple;
uiWC = WC * iSizeMultiple;
uiHC = HC * iSizeMultiple;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
//printf("size A = %d bytes,size B=%d bytes\n",mem_size_A,mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
printf("size A = %d bytes,size B=%d bytes,size C=%d bytes\n",mem_size_A,mem_size_B,mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) );
checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) );
checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C));
hipEvent_t kernel_start, kernel_stop;
hipEventCreate(&kernel_start);
hipEventCreate(&kernel_stop);
float kernel_time = 0.0f;
hipEventRecord(kernel_start, 0);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// int mm_grid=mm_GRID_X*mm_GRID_Y;
hipLaunchKernelGGL(( mm_kernel), dim3(mm_grid), dim3(mm_block), 0, 0, d_C, d_A, d_B, uiWA, uiWB);
hipDeviceSynchronize();
hipEventRecord(kernel_stop, 0);
hipEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
// copy result from device to host
checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(hipFree(d_A));
checkCudaErrors(hipFree(d_B));
checkCudaErrors(hipFree(d_C));
return 0;
}
| ac76a558da6cda35638dd748db2aea14fe3abd7b.cu |
#include <cassert>
#include <cfloat>
#include <cuda_runtime_api.h>
#include <cuda.h>
#include <iostream>
#include <stdio.h>
#include <list>
#include <map>
#include <math.h>
#include <stdlib.h>
#include <vector>
#include <set>
#include <algorithm>
#include <iterator>
#include <fstream>
#include "../include/common.h"
#define K 1
using namespace std;
//#define mm_BLOCK_SIZE_x 8
#define mm_BLOCK_SIZE 16
//#define mm_SUPER_BLOCKS_PER_SM 4
//int mm_SUPER_BLOCKS_PER_SM = 4;
#define iSizeMultiple 4 //must be multipes of 15
#define WA (4 * mm_BLOCK_SIZE) // Matrix A width
#define HA (4 * mm_BLOCK_SIZE) // Matrix A height
//#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width
#define WB (60 * mm_BLOCK_SIZE) // Matrix B width
#define HB WA // Matrix B height
#define WC WB // Matrix C width
#define HC HA // Matrix C height
#define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE)
#define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y)
#define AS(i, j) As[i][j]
#define BS(i, j) Bs[i][j]
void randomInit(float* data, int size)
{
for (int i = 0; i < size; ++i)
data[i] = rand() / (float)RAND_MAX;
}
void
computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB)
{
for (unsigned int i = 0; i < hA; ++i)
for (unsigned int j = 0; j < wB; ++j) {
double sum = 0;
for (unsigned int k = 0; k < wA; ++k) {
double a = A[i * wA + k];
double b = B[k * wB + j];
sum += a * b;
}
C[i * wB + j] = (float)sum;
}
}
__global__ void
mm_kernel( float* C, float* A, float* B, int wA, int wB)
{
// Block index
int bx = blockIdx.x;
int by = blockIdx.y;
// Thread index
int tx = threadIdx.x;
int ty = threadIdx.y;
// Index of the first sub-matrix of A processed by the block
int aBegin = wA * mm_BLOCK_SIZE * by;
// Index of the last sub-matrix of A processed by the block
int aEnd = aBegin + wA - 1;
// Step size used to iterate through the sub-matrices of A
int aStep = mm_BLOCK_SIZE;
// Index of the first sub-matrix of B processed by the block
int bBegin = mm_BLOCK_SIZE * bx;
// Step size used to iterate through the sub-matrices of B
int bStep = mm_BLOCK_SIZE * wB;
// Csub is used to store the element of the block sub-matrix
// that is computed by the thread
float Csub = 0;
// Loop over all the sub-matrices of A and B
// required to compute the block sub-matrix
for (int a = aBegin, b = bBegin;
a <= aEnd;
a += aStep, b += bStep) {
// Declaration of the shared memory array As used to
// store the sub-matrix of A
// __shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Declaration of the shared memory array Bs used to
// store the sub-matrix of B
// __shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE];
// Load the matrices from device memory
// to shared memory; each thread loads
// one element of each matrix
// AS(ty, tx) = A[a + wA * ty + tx];
// BS(ty, tx) = B[b + wB * ty + tx];
// Synchronize to make sure the matrices are loaded
// Multiply the two matrices together;
// each thread computes one element
// of the block sub-matrix
#pragma unroll
for (int k = 0; k < mm_BLOCK_SIZE; ++k)
Csub += A[a+wA * ty +k]*B[b + wB * k + tx];//AS(ty, k) * BS(k, tx);
// Synchronize to make sure that the preceding
// computation is done before loading two new
// sub-matrices of A and B in the next iteration
__syncthreads();
}
// Write the block sub-matrix to device memory;
// each thread writes one element
int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx;
C[c + wB * ty + tx] = Csub;
//if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1);
}
int main(int argc, char **argv) {
// cudaSetDevice(1);
srand(2013);
unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC;
uiWA = WA * iSizeMultiple;
uiHA = HA * iSizeMultiple;
uiWB = WB * iSizeMultiple;
uiHB = HB * iSizeMultiple;
uiWC = WC * iSizeMultiple;
uiHC = HC * iSizeMultiple;
// allocate host memory for matrices A and B
unsigned int size_A = uiWA * uiHA;
unsigned int mem_size_A = sizeof(float) * size_A;
float* h_A = (float*)malloc(mem_size_A);
unsigned int size_B = uiWB * uiHB;
unsigned int mem_size_B = sizeof(float) * size_B;
float* h_B = (float*)malloc(mem_size_B);
//printf("size A = %d bytes,size B=%d bytes\n",mem_size_A,mem_size_B);
// initialize host memory
randomInit(h_A, size_A);
randomInit(h_B, size_B);
// allocate device memory
float* d_A, *d_B, *d_C;
unsigned int size_C = uiWC * uiHC;
unsigned int mem_size_C = sizeof(float) * size_C;
printf("size A = %d bytes,size B=%d bytes,size C=%d bytes\n",mem_size_A,mem_size_B,mem_size_C);
// allocate host memory for the result
float* h_C = (float*) malloc(mem_size_C);
float* h_CUBLAS = (float*) malloc(mem_size_C);
checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A));
checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B));
// copy host memory to device
checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) );
checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C));
cudaEvent_t kernel_start, kernel_stop;
cudaEventCreate(&kernel_start);
cudaEventCreate(&kernel_stop);
float kernel_time = 0.0f;
cudaEventRecord(kernel_start, 0);
// setup execution parameters
dim3 mm_grid(mm_GRID_X, mm_GRID_Y);
dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE);
// int mm_grid=mm_GRID_X*mm_GRID_Y;
mm_kernel<<< mm_grid, mm_block>>>(d_C, d_A, d_B, uiWA, uiWB);
cudaDeviceSynchronize();
cudaEventRecord(kernel_stop, 0);
cudaEventSynchronize(kernel_stop);
// get elapsed time
kernel_time = 0.0f;
cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop);
kernel_time *= 1.e-3; // Convert to seconds
cout << "kernel exe time: " << kernel_time << endl;
// copy result from device to host
checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) );
// compute reference solution
float* reference = (float*)malloc(mem_size_C);
computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB);
// check result (matrixMul)
bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f);
printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL");
// ofstream f1("mm_correct.txt");
// for(int i=0; i<size_C; ++i)
// f1 << reference[i] << endl;
// f1.close();
//
// ofstream f2("mm_gpu.txt");
// for(int i=0; i<size_C; ++i)
// f2 << h_C[i] << endl;
// f2.close();
// clean up memory
free(h_A);
free(h_B);
free(h_C);
free(reference);
checkCudaErrors(cudaFree(d_A));
checkCudaErrors(cudaFree(d_B));
checkCudaErrors(cudaFree(d_C));
return 0;
}
|
84defe73888ac50704e64c95c6f99d0f1d5015ea.hip | // !!! This is a file automatically generated by hipify!!!
// #include "gpu_runtime.h"
// // the shape of bn_scale/bias 1*C*1*1
// int CuDNN_DLGpuRelu(const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
// int dev_id = (input_X->ctx).device_id;
// hipSetDevice(dev_id);
// cudnn_init(dev_id, stream_handle);
// int input_N = input->shape[0];
// int input_C = input->shape[1];
// int input_H = input->shape[2];
// int input_W = input->shape[3];
// if(p != NULL){
// int size_input = 1, size_output = 1;
// for(int i = 0; i < input -> ndim; i++)
// size_input *= input -> shape[i];
// for(int i = 0; i < output -> ndim; i++)
// size_output *= output -> shape[i];
// p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
// p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
// p -> workspace_memory = 0;
// cudnnTensorDescriptor_t input_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnTensorDescriptor_t output_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnActivationDescriptor_t activation_desc;
// CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc));
// CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv
// float *input_data = (float *)(input->data);
// float *output_data = (float *)(output->data);
// // Insert the begin and end event.
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventRecord(start,0);
// float alpha = 1.0f;
// float beta = 0.0f;
// CUDNN_CALL(cudnnActivationForward(cudnn_map[dev_id], activation_desc, &alpha,
// input_desc, input_data,
// &beta,
// output_desc, output_data));
// float elapsedTime;
// hipEventCreate(&stop);
// hipEventRecord(stop,0);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&elapsedTime, start,stop);
// p->time = elapsedTime;
// CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
// CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
// CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc));
// CUDNN_CALL(cudnnDestroy(cudnn_handle));
// }else{
// // input
// cudnnTensorDescriptor_t input_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnTensorDescriptor_t output_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnActivationDescriptor_t activation_desc;
// CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc));
// CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv
// float *input_data = (float *)(input->data);
// float *output_data = (float *)(output->data);
// float alpha = 1.0f;
// float beta = 0.0f;
// CUDNN_CALL(cudnnActivationForward(cudnn_handle, activation_desc, &alpha,
// input_desc, input_data,
// &beta,
// output_desc, output_data));
// CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
// CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
// CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc));
// CUDNN_CALL(cudnnDestroy(cudnn_handle));
// }
// return 0;
// }
// int CuDNN_DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad,
// DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL)
// int dev_id = (input_X->ctx).device_id;
// hipSetDevice(dev_id);
// cudnn_init(dev_id, stream_handle);
// int input_N = input->shape[0];
// int input_C = input->shape[1];
// int input_H = input->shape[2];
// int input_W = input->shape[3];
// if(p != NULL){
// int size_input = 1, size_output = 1;
// for(int i = 0; i < input -> ndim; i++)
// size_input *= input -> shape[i];
// for(int i = 0; i < output -> ndim; i++)
// size_output *= output -> shape[i];
// p -> input_memory = 2.0 * (size_input) * sizeof(float) / 1024 / 1024;
// p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
// p -> workspace_memory = 0;
// cudnnTensorDescriptor_t input_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnTensorDescriptor_t output_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnActivationDescriptor_t activation_desc;
// CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc));
// CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv
// float *input_data = (float *)(input->data);
// float *output_data = (float *)(output->data);
// // Insert the begin and end event.
// hipEvent_t start, stop;
// hipEventCreate(&start);
// hipEventRecord(start,0);
// float alpha = 1.0f;
// float beta = 0.0f;
// CUDNN_CALL(cudnnActivationForward(cudnn_map[dev_id], activation_desc, &alpha,
// input_desc, input_data,
// &beta,
// output_desc, output_data));
// float elapsedTime;
// hipEventCreate(&stop);
// hipEventRecord(stop,0);
// hipEventSynchronize(stop);
// hipEventElapsedTime(&elapsedTime, start,stop);
// p->time = elapsedTime;
// CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
// CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
// CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc));
// CUDNN_CALL(cudnnDestroy(cudnn_handle));
// }
// return 0;
// } | 84defe73888ac50704e64c95c6f99d0f1d5015ea.cu | // #include "gpu_runtime.h"
// // the shape of bn_scale/bias 1*C*1*1
// int CuDNN_DLGpuRelu(const DLArrayHandle input, DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL){
// int dev_id = (input_X->ctx).device_id;
// cudaSetDevice(dev_id);
// cudnn_init(dev_id, stream_handle);
// int input_N = input->shape[0];
// int input_C = input->shape[1];
// int input_H = input->shape[2];
// int input_W = input->shape[3];
// if(p != NULL){
// int size_input = 1, size_output = 1;
// for(int i = 0; i < input -> ndim; i++)
// size_input *= input -> shape[i];
// for(int i = 0; i < output -> ndim; i++)
// size_output *= output -> shape[i];
// p -> input_memory = 1.0 * (size_input) * sizeof(float) / 1024 / 1024;
// p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
// p -> workspace_memory = 0;
// cudnnTensorDescriptor_t input_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnTensorDescriptor_t output_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnActivationDescriptor_t activation_desc;
// CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc));
// CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv
// float *input_data = (float *)(input->data);
// float *output_data = (float *)(output->data);
// // Insert the begin and end event.
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventRecord(start,0);
// float alpha = 1.0f;
// float beta = 0.0f;
// CUDNN_CALL(cudnnActivationForward(cudnn_map[dev_id], activation_desc, &alpha,
// input_desc, input_data,
// &beta,
// output_desc, output_data));
// float elapsedTime;
// cudaEventCreate(&stop);
// cudaEventRecord(stop,0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&elapsedTime, start,stop);
// p->time = elapsedTime;
// CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
// CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
// CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc));
// CUDNN_CALL(cudnnDestroy(cudnn_handle));
// }else{
// // input
// cudnnTensorDescriptor_t input_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnTensorDescriptor_t output_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnActivationDescriptor_t activation_desc;
// CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc));
// CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv
// float *input_data = (float *)(input->data);
// float *output_data = (float *)(output->data);
// float alpha = 1.0f;
// float beta = 0.0f;
// CUDNN_CALL(cudnnActivationForward(cudnn_handle, activation_desc, &alpha,
// input_desc, input_data,
// &beta,
// output_desc, output_data));
// CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
// CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
// CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc));
// CUDNN_CALL(cudnnDestroy(cudnn_handle));
// }
// return 0;
// }
// int CuDNN_DLGpuReluGradient(const DLArrayHandle input, const DLArrayHandle in_grad,
// DLArrayHandle output, DLStreamHandle stream_handle = NULL, ProfilerHandle p = NULL)
// int dev_id = (input_X->ctx).device_id;
// cudaSetDevice(dev_id);
// cudnn_init(dev_id, stream_handle);
// int input_N = input->shape[0];
// int input_C = input->shape[1];
// int input_H = input->shape[2];
// int input_W = input->shape[3];
// if(p != NULL){
// int size_input = 1, size_output = 1;
// for(int i = 0; i < input -> ndim; i++)
// size_input *= input -> shape[i];
// for(int i = 0; i < output -> ndim; i++)
// size_output *= output -> shape[i];
// p -> input_memory = 2.0 * (size_input) * sizeof(float) / 1024 / 1024;
// p -> output_memory = 1.0 * size_output * sizeof(float) / 1024 / 1024;
// p -> workspace_memory = 0;
// cudnnTensorDescriptor_t input_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&input_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(input_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnTensorDescriptor_t output_desc;
// CUDNN_CALL(cudnnCreateTensorDescriptor(&output_desc));
// CUDNN_CALL(cudnnSetTensor4dDescriptor(output_desc, CUDNN_TENSOR_NCHW, CUDNN_DATA_FLOAT, input_N, input_C, input_H, input_W));
// cudnnActivationDescriptor_t activation_desc;
// CUDNN_CALL(cudnnCreateActivationDescriptor(&activation_desc));
// CUDNN_CALL(cudnnSetActivationDescriptor(activation_desc, CUDNN_ACTIVATION_RELU, CUDNN_PROPAGATE_NAN, 0));// after conv
// float *input_data = (float *)(input->data);
// float *output_data = (float *)(output->data);
// // Insert the begin and end event.
// cudaEvent_t start, stop;
// cudaEventCreate(&start);
// cudaEventRecord(start,0);
// float alpha = 1.0f;
// float beta = 0.0f;
// CUDNN_CALL(cudnnActivationForward(cudnn_map[dev_id], activation_desc, &alpha,
// input_desc, input_data,
// &beta,
// output_desc, output_data));
// float elapsedTime;
// cudaEventCreate(&stop);
// cudaEventRecord(stop,0);
// cudaEventSynchronize(stop);
// cudaEventElapsedTime(&elapsedTime, start,stop);
// p->time = elapsedTime;
// CUDNN_CALL(cudnnDestroyTensorDescriptor(input_desc));
// CUDNN_CALL(cudnnDestroyTensorDescriptor(output_desc));
// CUDNN_CALL(cudnnDestroyActivationDescriptor(activation_desc));
// CUDNN_CALL(cudnnDestroy(cudnn_handle));
// }
// return 0;
// } |
ecf6fec9466e68077036a90a462bf4bfa732681f.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <hiprand/hiprand_kernel.h>
#include <stdlib.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "RoyFloyd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
hipSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *matrix = NULL;
hipMalloc(&matrix, XSIZE*YSIZE);
int k = 1;
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
hipFree(0);hipLaunchKernelGGL((
RoyFloyd), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,k,N);
hipDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL((
RoyFloyd), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,k,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL((
RoyFloyd), dim3(gridBlock),dim3(threadBlock), 0, 0, matrix,k,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} | ecf6fec9466e68077036a90a462bf4bfa732681f.cu | #include <stdbool.h>
#include <stdio.h>
#include <string.h>
#include <getopt.h>
#include <curand_kernel.h>
#include <stdlib.h>
#include <cuda.h>
#include <sys/time.h>
#include "RoyFloyd.cu"
#include<chrono>
#include<iostream>
using namespace std;
using namespace std::chrono;
int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}};
int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}};
int main(int argc, char **argv) {
cudaSetDevice(0);
char* p;int matrix_len=strtol(argv[1], &p, 10);
for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){
for(int block_looper=0;block_looper<20;block_looper++){
int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1];
int *matrix = NULL;
cudaMalloc(&matrix, XSIZE*YSIZE);
int k = 1;
int N = XSIZE*YSIZE;
int iXSIZE= XSIZE;
int iYSIZE= YSIZE;
while(iXSIZE%BLOCKX!=0)
{
iXSIZE++;
}
while(iYSIZE%BLOCKY!=0)
{
iYSIZE++;
}
dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY);
dim3 threadBlock(BLOCKX, BLOCKY);
cudaFree(0);
RoyFloyd<<<gridBlock,threadBlock>>>(matrix,k,N);
cudaDeviceSynchronize();
for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {
RoyFloyd<<<gridBlock,threadBlock>>>(matrix,k,N);
}
auto start = steady_clock::now();
for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {
RoyFloyd<<<gridBlock,threadBlock>>>(matrix,k,N);
}
auto end = steady_clock::now();
auto usecs = duration_cast<duration<float, microseconds::period> >(end - start);
cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl;
}
}} |
957b4c76270254d61058f7770416b671439d24f5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
////////////////////////////////////////////////////////////////////////////
// File: ProgramCU.cu
// Author: Changchang Wu
// Description : implementation of ProgramCU and all CUDA kernels
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to [email protected]
//
////////////////////////////////////////////////////////////////////////////
#if defined(CUDA_SIFTGPU_ENABLED)
#include "GL/glew.h"
#include <iostream>
#include <algorithm>
using namespace std;
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "GlobalUtil.h"
//Standard block size
#define BLOCK_DIM 16
#define BLOCK_LOG_DIM 4
#define IMUL(X,Y) __mul24(X,Y)
//#define FDIV(X,Y) ((X)/(Y))
#define FDIV(X,Y) __fdividef(X,Y)
//filter kernel
#define KERNEL_MAX_WIDTH 33
//#define MAX_THREAD_PER_BLOCK 512 = 16 * 32
//////////////////////////////larger block gives better performance
#define FILTERV_TILE_WIDTH 16
#define FILTERV_TILE_HEIGHT 128
#define FILTERV_TBLK_HEIGHT 32
////////////////////////////
#define FILTERH_TILE_WIDTH 128
__device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH];
texture<float, 1, hipReadModeElementType> texData;
texture<float2, 2, hipReadModeElementType> texDataF2;
texture<float4, 1, hipReadModeElementType> texDataF4;
texture<int4, 1, hipReadModeElementType> texDataI4;
texture<int4, 1, hipReadModeElementType> texDataList;
//template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];}
//template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; }
//////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterH( float* d_result, int width)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1;
const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH;
__shared__ float data[CACHE_WIDTH];
const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH);
const int col = bcol + threadIdx.x;
const int index_min = IMUL(blockIdx.y, width);
const int index_max = index_min + width - 1;
int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x;
int cache_index = threadIdx.x;
float value = 0;
#pragma unroll
for(int j = 0; j < CACHE_COUNT; ++j)
{
if(cache_index < CACHE_WIDTH)
{
int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index);
data[cache_index] = tex1Dfetch(texData,fetch_index);
src_index += FILTERH_TILE_WIDTH;
cache_index += FILTERH_TILE_WIDTH;
}
}
__syncthreads();
if(col >= width) return;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[threadIdx.x + i]* d_kernel[i]);
}
// value = Conv<FW-1>(data + threadIdx.x);
d_result[index_min + col] = value;
}
////////////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterV(float* d_result, int width, int height)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1;
const int TEMP = CACHE_WIDTH & 0xf;
//add some extra space to avoid bank conflict
#if FILTERV_TILE_WIDTH == 16
//make the stride 16 * n +/- 1
const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP;
#elif FILTERV_TILE_WIDTH == 8
//make the stride 16 * n +/- 2
const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP);
#elif FILTERV_TILE_WIDTH == 4
//make the stride 16 * n +/- 4
const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP);
#else
#error
#endif
const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;
const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_TBLK_HEIGHT - 1) / FILTERV_TBLK_HEIGHT;
const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_TBLK_HEIGHT -1) / FILTERV_TBLK_HEIGHT;
__shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH];
const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT);
const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x;
const int row_first = row_block_first - HALF_WIDTH;
const int data_index_max = IMUL(height - 1, width) + col;
const int cache_col_start = threadIdx.y;
const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH);
int cache_index = cache_col_start + cache_row_start;
int data_index = IMUL(row_first + cache_col_start, width) + col;
if(col < width)
{
#pragma unroll
for(int i = 0; i < CACHE_COUNT; ++i)
{
if(cache_col_start < CACHE_WIDTH - i * FILTERV_TBLK_HEIGHT)
{
int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index);
data[cache_index + i * FILTERV_TBLK_HEIGHT] = tex1Dfetch(texData,fetch_index);
data_index += IMUL(FILTERV_TBLK_HEIGHT, width);
}
}
}
__syncthreads();
if(col >= width) return;
int row = row_block_first + threadIdx.y;
int index_start = cache_row_start + threadIdx.y;
#pragma unroll
for(int i = 0; i < WRITE_COUNT; ++i,
row += FILTERV_TBLK_HEIGHT, index_start += FILTERV_TBLK_HEIGHT)
{
if(row < height)
{
int index_dest = IMUL(row, width) + col;
float value = 0;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[index_start + i] * d_kernel[i]);
}
d_result[index_dest] = value;
}
}
}
template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width)
{
const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1);
const float INV_SCALE = 1.0f / (float(SCALE));
int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(col >= width) return;
int row = blockIdx.y >> LOG_SCALE;
int index = row * width + col;
int dst_row = blockIdx.y;
int dst_idx= (width * dst_row + col) * SCALE;
int helper = blockIdx.y & SCALE_MASK;
if (helper)
{
float v11 = tex1Dfetch(texData, index);
float v12 = tex1Dfetch(texData, index + 1);
index += width;
float v21 = tex1Dfetch(texData, index);
float v22 = tex1Dfetch(texData, index + 1);
float w1 = INV_SCALE * helper, w2 = 1.0 - w1;
float v1 = (v21 * w1 + w2 * v11);
float v2 = (v22 * w1 + w2 * v12);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}else
{
float v1 = tex1Dfetch(texData, index);
float v2 = tex1Dfetch(texData, index + 1);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
src->BindTexture(texData);
dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale);
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : hipLaunchKernelGGL(( UpsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
case 2 : hipLaunchKernelGGL(( UpsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
case 3 : hipLaunchKernelGGL(( UpsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, width); break;
default: break;
}
}
template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << LOG_SCALE), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << LOG_SCALE;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
__global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << log_scale), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << log_scale;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ;
src->BindTexture(texData);
dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight());
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : hipLaunchKernelGGL(( DownsampleKernel<1>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
case 2 :hipLaunchKernelGGL(( DownsampleKernel<2>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
case 3 : hipLaunchKernelGGL(( DownsampleKernel<3>) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width); break;
default:hipLaunchKernelGGL(( DownsampleKernel) , dim3(grid), dim3(block), 0, 0, (float*) dst->_cuData, src_width, dst_width, log_scale);
}
}
__global__ void ChannelReduce_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texData, index*4);
}
__global__ void ChannelReduce_Convert_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
float4 rgba = tex1Dfetch(texDataF4, index);
d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z;
}
void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
if(convert_rgb)
{
src->BindTexture(texDataF4);
hipLaunchKernelGGL(( ChannelReduce_Convert_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}else
{
src->BindTexture(texData);
hipLaunchKernelGGL(( ChannelReduce_Kernel), dim3(grid), dim3(block), 0, 0, (float*)dst->_cuData);
}
}
void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width)
{
int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;//
width = 2*sz + 1;
//filter size truncation
if(width > KERNEL_MAX_WIDTH)
{
//std::cout<<"Filter truncated "<<width<<"->"<<KERNEL_MAX_WIDTH<<endl;
sz = KERNEL_MAX_WIDTH >> 1;
width =KERNEL_MAX_WIDTH;
}
float rv = 1.0f/(sigma*sigma), v, ksum =0;
// pre-compute filter
for( i = -sz ; i <= sz ; ++i)
{
kernel[i+sz] = v = exp(-0.5f * i * i *rv) ;
ksum += v;
}
//normalize the kernel
rv = 1.0f/ksum;
for(i = 0; i< width ;i++) kernel[i]*=rv;
}
template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
//horizontal filtering
src->BindTexture(texData);
dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height);
dim3 blockh(FILTERH_TILE_WIDTH);
hipLaunchKernelGGL(( FilterH<FW>), dim3(gridh), dim3(blockh), 0, 0, (float*)buf->_cuData, width);
CheckErrorCUDA("FilterH");
///vertical filtering
buf->BindTexture(texData);
dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT);
dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_TBLK_HEIGHT);
hipLaunchKernelGGL(( FilterV<FW>), dim3(gridv), dim3(blockv), 0, 0, (float*)dst->_cuData, width, height);
CheckErrorCUDA("FilterV");
}
//////////////////////////////////////////////////////////////////////
// tested on 2048x1500 image, the time on pyramid construction is
// -pack cg version : 18ms
// -unpack cg version : 49 ms
// -cuda version: 28 ms
void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma)
{
float filter_kernel[KERNEL_MAX_WIDTH]; int width;
CreateFilterKernel(sigma, filter_kernel, width);
hipMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, hipMemcpyHostToDevice);
switch(width)
{
case 5: FilterImage< 5>(dst, src, buf); break;
case 7: FilterImage< 7>(dst, src, buf); break;
case 9: FilterImage< 9>(dst, src, buf); break;
case 11: FilterImage<11>(dst, src, buf); break;
case 13: FilterImage<13>(dst, src, buf); break;
case 15: FilterImage<15>(dst, src, buf); break;
case 17: FilterImage<17>(dst, src, buf); break;
case 19: FilterImage<19>(dst, src, buf); break;
case 21: FilterImage<21>(dst, src, buf); break;
case 23: FilterImage<23>(dst, src, buf); break;
case 25: FilterImage<25>(dst, src, buf); break;
case 27: FilterImage<27>(dst, src, buf); break;
case 29: FilterImage<29>(dst, src, buf); break;
case 31: FilterImage<31>(dst, src, buf); break;
case 33: FilterImage<33>(dst, src, buf); break;
default: break;
}
}
#define DOG_BLOCK_DIMX 128
#define DOG_BLOCK_DIMY 1
#define DOG_BLOCK_LOG_DIMX 7
#define DOG_BLOCK_LOG_DIMY 0
texture<float, 1, hipReadModeElementType> texC;
texture<float, 1, hipReadModeElementType> texP;
texture<float, 1, hipReadModeElementType> texN;
void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
float vxn = tex1Dfetch(texC, index + 1);
float vxp = tex1Dfetch(texC, index - 1);
float vyp = tex1Dfetch(texC, index - width);
float vyn = tex1Dfetch(texC, index + width);
float dx = vxn - vxp, dy = vyn - vyp;
float grd = 0.5f * sqrt(dx * dx + dy * dy);
float rot = (grd == 0.0f? 0.0f : atan2(dy, dx));
d_got[index] = make_float2(grd, rot);
}
}
void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
}
}
void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got)
{
int width = gus->GetImgWidth(), height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
(gus -1)->BindTexture(texP);
if(got->_cuData)
hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, (float2*) got->_cuData, width, height);
else
hipLaunchKernelGGL(( ComputeDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) dog->_cuData, width, height);
}
#define KEY_BLOCK_LOG_DIMX 3
#define KEY_BLOCK_LOG_DIMY 3
#define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX)
#define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY)
//4/5, 3/2 -> 33
//4, 1 -> 45
//4, 0 -> 60
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1);\
datai[1] = tex1Dfetch(tex, idx);\
datai[2] = tex1Dfetch(tex, idx + 1);\
if(v > nmax)\
{\
nmax = max(nmax, datai[0]);\
nmax = max(nmax, datai[1]);\
nmax = max(nmax, datai[2]);\
if(v < nmax) goto key_finish;\
}else\
{\
nmin = min(nmin, datai[0]);\
nmin = min(nmin, datai[1]);\
nmin = min(nmin, datai[2]);\
if(v > nmin) goto key_finish;\
}
void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax,
float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization)
{
float data[3][3], v;
float datap[3][3], datan[3][3];
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1;
int index = IMUL(row, width) + col;
int idx[3] ={index - width, index, index + width};
int in_image =0;
float nmax, nmin, result = 0.0f;
float dx = 0, dy = 0, ds = 0;
bool offset_test_passed = true;
if(row < rowmax && col < colmax)
{
in_image = 1;
data[1][1] = v = tex1Dfetch(texC, idx[1]);
if(fabs(v) <= dog_threshold0) goto key_finish;
data[1][0] = tex1Dfetch(texC, idx[1] - 1);
data[1][2] = tex1Dfetch(texC, idx[1] + 1);
nmax = max(data[1][0], data[1][2]);
nmin = min(data[1][0], data[1][2]);
if(v <=nmax && v >= nmin) goto key_finish;
//if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish;
READ_CMP_DOG_DATA(data[0], texC, idx[0]);
READ_CMP_DOG_DATA(data[2], texC, idx[2]);
//edge supression
float vx2 = v * 2.0f;
float fxx = data[1][0] + data[1][2] - vx2;
float fyy = data[0][1] + data[2][1] - vx2;
float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]);
float temp1 = fxx * fyy - fxy * fxy;
float temp2 = (fxx + fyy) * (fxx + fyy);
if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish;
//read the previous level
READ_CMP_DOG_DATA(datap[0], texP, idx[0]);
READ_CMP_DOG_DATA(datap[1], texP, idx[1]);
READ_CMP_DOG_DATA(datap[2], texP, idx[2]);
//read the next level
READ_CMP_DOG_DATA(datan[0], texN, idx[0]);
READ_CMP_DOG_DATA(datan[1], texN, idx[1]);
READ_CMP_DOG_DATA(datan[2], texN, idx[2]);
if(subpixel_localization)
{
//subpixel localization
float fx = 0.5f * (data[1][2] - data[1][0]);
float fy = 0.5f * (data[2][1] - data[0][1]);
float fs = 0.5f * (datan[1][1] - datap[1][1]);
float fss = (datan[1][1] + datap[1][1] - vx2);
float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]);
float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10)
{
if(maxa == A1.x)
{
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x)
{
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y))
{
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10)
{
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10)
{
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
offset_test_passed =
fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold
&&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f;
}
}
}
}
if(offset_test_passed) result = v > nmax ? 1.0 : -1.0;
}
key_finish:
if(in_image) d_key[index] = make_float4(result, dx, dy, ds);
}
void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge)
{
int width = dog->GetImgWidth(), height = dog->GetImgHeight();
float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog;
CuTexImage* dogp = dog - 1;
CuTexImage* dogn = dog + 1;
dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY);
dogp->BindTexture(texP);
dog ->BindTexture(texC);
dogn->BindTexture(texN);
Tedge = (Tedge+1)*(Tedge+1)/Tedge;
hipLaunchKernelGGL(( ComputeKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) key->_cuData, width, width -1, height -1,
Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization);
}
#define HIST_INIT_WIDTH 128
void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
if(row > 0 && row < height -1)
{
#pragma unroll
for(int i = 0; i < 4 ; ++i, ++scol)
{
float4 temp = tex1Dfetch(texDataF4, sidx +i);
v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0;
}
}
hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist)
{
int ws = key->GetImgWidth(), hs = key->GetImgHeight();
int wd = hist->GetImgWidth(), hd = hist->GetImgHeight();
dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd);
dim3 block(HIST_INIT_WIDTH, 1);
key->BindTexture(texDataF4);
hipLaunchKernelGGL(( InitHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) hist->_cuData, ws, wd, hd);
//CheckHistInit(key, hist);
}
void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
#pragma unroll
for(int i = 0; i < 4 && scol < ws; ++i, ++scol)
{
int4 temp = tex1Dfetch(texDataI4, sidx + i);
v[i] = temp.x + temp.y + temp.z + temp.w;
}
d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2)
{
int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight();
int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight();
int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f));
const int wi = min(7, max(temp , 0));
hist1->BindTexture(texDataI4);
const int BW = 1 << wi, BH = 1 << (7 - wi);
dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH);
dim3 block(BW, BH);
hipLaunchKernelGGL(( ReduceHist_Kernel), dim3(grid), dim3(block), 0, 0, (int4*)hist2->_cuData, ws, wd, hd);
}
#define LISTGEN_BLOCK_DIM 128
void __global__ ListGen_Kernel(int4* d_list, int width)
{
int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int4 pos = tex1Dfetch(texDataList, idx1);
int idx2 = IMUL(pos.y, width) + pos.x;
int4 temp = tex1Dfetch(texDataI4, idx2);
int sum1 = temp.x + temp.y;
int sum2 = sum1 + temp.z;
pos.x <<= 2;
if(pos.z >= sum2)
{
pos.x += 3;
pos.z -= sum2;
}else if(pos.z >= sum1)
{
pos.x += 2;
pos.z -= sum1;
}else if(pos.z >= temp.x)
{
pos.x += 1;
pos.z -= temp.x;
}
d_list[idx1] = pos;
}
//input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist)
{
int len = list->GetImgWidth();
list->BindTexture(texDataList);
hist->BindTexture(texDataI4);
dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM);
dim3 block(LISTGEN_BLOCK_DIM);
hipLaunchKernelGGL(( ListGen_Kernel), dim3(grid), dim3(block), 0, 0, (int4*) list->_cuData, hist->GetImgWidth());
}
void __global__ ComputeOrientation_Kernel(float4* d_list,
int list_len,
int width, int height,
float sigma, float sigma_step,
float gaussian_factor, float sample_factor,
int num_orientation,
int existing_keypoint,
int subpixel,
int keepsign)
{
const float ten_degree_per_radius = 5.7295779513082320876798154814105;
const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= list_len) return;
float4 key;
if(existing_keypoint)
{
key = tex1Dfetch(texDataF4, idx);
}else
{
int4 ikey = tex1Dfetch(texDataList, idx);
key.x = ikey.x + 0.5f;
key.y = ikey.y + 0.5f;
key.z = sigma;
if(subpixel || keepsign)
{
float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);
if(subpixel)
{
key.x += offset.y;
key.y += offset.z;
key.z *= pow(sigma_step, offset.w);
}
if(keepsign) key.z *= offset.x;
}
}
if(num_orientation == 0)
{
key.w = 0;
d_list[idx] = key;
return;
}
float vote[37];
float gsigma = key.z * gaussian_factor;
float win = fabs(key.z) * sample_factor;
float dist_threshold = win * win + 0.5;
float factor = -0.5f / (gsigma * gsigma);
float xmin = max(1.5f, floor(key.x - win) + 0.5f);
float ymin = max(1.5f, floor(key.y - win) + 0.5f);
float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);
float ymax = min(height -1.5f, floor(key.y + win) + 0.5f);
#pragma unroll
for(int i = 0; i < 36; ++i) vote[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - key.x;
float dy = y - key.y;
float sq_dist = dx * dx + dy * dy;
if(sq_dist >= dist_threshold) continue;
float2 got = tex2D(texDataF2, x, y);
float weight = got.x * exp(sq_dist * factor);
float fidx = floor(got.y * ten_degree_per_radius);
int oidx = fidx;
if(oidx < 0) oidx += 36;
vote[oidx] += weight;
}
}
//filter the vote
const float one_third = 1.0 /3.0;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
vote[36] = vote[0];
float pre = vote[35];
#pragma unroll
for(int j = 0; j < 36; ++j)
{
float temp = one_third * (pre + vote[j] + vote[j + 1]);
pre = vote[j]; vote[j] = temp;
}
}
vote[36] = vote[0];
if(num_orientation == 1 || existing_keypoint)
{
int index_max = 0;
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
{
index_max = vote[i] > max_vote? i : index_max;
max_vote = max(max_vote, vote[i]);
}
float pre = vote[index_max == 0? 35 : index_max -1];
float next = vote[index_max + 1];
float weight = max_vote;
float off = 0.5f * FDIV(next - pre, weight + weight - next - pre);
key.w = radius_per_ten_degrees * (index_max + 0.5f + off);
d_list[idx] = key;
}else
{
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_rot[2], max_vot[2] = {0, 0};
int ocount = 0;
#pragma unroll
for(int i =0; i < 36; ++i)
{
float next = vote[i + 1];
if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next)
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
///
if(weight > max_vot[1])
{
if(weight > max_vot[0])
{
max_vot[1] = max_vot[0];
max_rot[1] = max_rot[0];
max_vot[0] = weight;
max_rot[0] = rot;
}
else
{
max_vot[1] = weight;
max_rot[1] = rot;
}
ocount ++;
}
}
pre = vote[i];
}
float fr1 = max_rot[0] / 36.0f;
if(fr1 < 0) fr1 += 1.0f;
unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f));
unsigned short us2 = 65535;
if(ocount > 1)
{
float fr2 = max_rot[1] / 36.0f;
if(fr2 < 0) fr2 += 1.0f;
us2 = (unsigned short ) floor(fr2 * 65535.0f);
}
unsigned int uspack = (us2 << 16) | us1;
key.w = __int_as_float(uspack);
d_list[idx] = key;
}
}
void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key,
float sigma, float sigma_step, int existing_keypoint)
{
int len = list->GetImgWidth();
if(len <= 0) return;
int width = got->GetImgWidth(), height = got->GetImgHeight();
if(existing_keypoint)
{
list->BindTexture(texDataF4);
}else
{
list->BindTexture(texDataList);
if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4);
}
got->BindTexture2D(texDataF2);
const int block_width = 64;
dim3 grid((len + block_width -1) / block_width);
dim3 block(block_width);
hipLaunchKernelGGL(( ComputeOrientation_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) list->_cuData,
len, width, height, sigma, sigma_step,
GlobalUtil::_OrientationGaussianFactor,
GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor,
GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation,
existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign);
ProgramCU::CheckErrorCUDA("ComputeOrientation");
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
float spt = fabs(key.z * window_factor);
float s, c; __sincosf(key.w, &s, &c);
float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ;
float cspt = c * spt, sspt = s * spt;
float crspt = c / spt, srspt = s / spt;
float2 offsetpt, pt;
float xmin, ymin, xmax, ymax, bsz;
offsetpt.x = ix - 1.5f;
offsetpt.y = iy - 1.5f;
pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x;
pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y;
bsz = fabs(cspt) + fabs(sspt);
xmin = max(1.5f, floor(pt.x - bsz) + 0.5f);
ymin = max(1.5f, floor(pt.y - bsz) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - pt.x;
float dy = y - pt.y;
float nx = crspt * dx + srspt * dy;
float ny = crspt * dy - srspt * dx;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float dnx = nx + offsetpt.x;
float dny = ny + offsetpt.y;
float ww = exp(-0.125f * (dnx * dnx + dny * dny));
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = ww * wx * wy * cc.x;
float theta = (anglef - cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
float4 temp[32];
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int sidx = idx << 5;
float norm1 = 0, norm2 = 0;
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i] = tex1Dfetch(texDataF4, sidx +i);
norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm1 = rsqrt(norm1);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x = min(0.2f, temp[i].x * norm1);
temp[i].y = min(0.2f, temp[i].y * norm1);
temp[i].z = min(0.2f, temp[i].z * norm1);
temp[i].w = min(0.2f, temp[i].w * norm1);
norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm2 = rsqrt(norm2);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x *= norm2; temp[i].y *= norm2;
temp[i].z *= norm2; temp[i].w *= norm2;
d_des[sidx + i] = temp[i];
}
}
void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex)
{
int num = list->GetImgWidth();
int width = got->GetImgWidth();
int height = got->GetImgHeight();
dtex->InitTexture(num*128, 1, 1);
got->BindTexture2D(texDataF2);
list->BindTexture(texDataF4);
int block_width = 64;
dim3 grid((num * 16 + block_width -1) / block_width);
dim3 block(block_width);
if(GlobalUtil::_UseDynamicIndexing)
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<true>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
hipLaunchKernelGGL(( ComputeDescriptor_Kernel<false>), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
if(GlobalUtil::_NormalizedSIFT)
{
dtex->BindTexture(texDataF4);
const int block_width = 32;
dim3 grid((num + block_width -1) / block_width);
dim3 block(block_width);
hipLaunchKernelGGL(( NormalizeDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) dtex->_cuData, num);
}
CheckErrorCUDA("ComputeDescriptor");
}
//////////////////////////////////////////////////////
void ProgramCU::FinishCUDA()
{
hipDeviceSynchronize();
}
void ProgramCU::CheckErrorCUDA(const char* location)
{
hipError_t e = hipGetLastError();
if(e)
{
if(location) std::cerr << location << ":\t";
std::cerr << hipGetErrorString(e) << endl;
}
}
void __global__ ConvertDOG_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0.5 : saturate(0.5+20.0*v);
}
}
///
void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = dog->GetImgWidth(), height = dog ->GetImgHeight();
dog->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertDOG_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertDOG");
}
void __global__ ConvertGRD_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index << 1);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0 : saturate(5 * v);
}
}
void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = got->GetImgWidth(), height = got ->GetImgHeight();
got->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertGRD_Kernel), dim3(grid), dim3(block), 0, 0, (float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertGRD");
}
void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float4 keyv = tex1Dfetch(texDataF4, index);
int is_key = (keyv.x == 1.0f || keyv.x == -1.0f);
int inside = col > 0 && row > 0 && row < height -1 && col < width - 1;
float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5;
d_result[index] = is_key && inside ?
(keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)):
make_float4(v, v, v, 1.0f) ;
}
}
void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = key->GetImgWidth(), height = key ->GetImgHeight();
dog->BindTexture(texData);
key->BindTexture(texDataF4);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
hipLaunchKernelGGL(( ConvertKEY_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, width, height);
}
void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
float4 v = tex1Dfetch(texDataF4, idx);
d_result[idx] = make_float4(v.x, v.y, 0, 1.0f);
}
void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out)
{
int num = ftex->GetImgWidth();
int block_width = 64;
dim3 grid((num + block_width -1) /block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
hipLaunchKernelGGL(( DisplayKeyPoint_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, num);
ProgramCU::CheckErrorCUDA("DisplayKeyPoint");
}
void __global__ DisplayKeyBox_Kernel(float4* d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int kidx = idx / 10, vidx = idx - IMUL(kidx , 10);
float4 v = tex1Dfetch(texDataF4, kidx);
float sz = fabs(v.z * 3.0f);
///////////////////////
float s, c; __sincosf(v.w, &s, &c);
///////////////////////
float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz);
float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz);
float4 pos;
pos.x = v.x + c * dx - s * dy;
pos.y = v.y + c * dy + s * dx;
pos.z = 0; pos.w = 1.0f;
d_result[idx] = pos;
}
void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out)
{
int len = ftex->GetImgWidth();
int block_width = 32;
dim3 grid((len * 10 + block_width -1) / block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
hipLaunchKernelGGL(( DisplayKeyBox_Kernel), dim3(grid), dim3(block), 0, 0, (float4*) out->_cuData, len * 10);
}
///////////////////////////////////////////////////////////////////
inline void CuTexImage:: BindTexture(textureReference& texRef)
{
hipBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes);
}
inline void CuTexImage::BindTexture2D(textureReference& texRef)
{
hipChannelFormatDesc desc;
hipGetChannelDesc(&desc, _cuData2D);
hipBindTextureToArray(&texRef, _cuData2D, &desc);
}
int ProgramCU::IsCudaSupported()
{
int deviceCount;
hipGetDeviceCount(&deviceCount);
return deviceCount;
}
////////////////////////////////////////////////////////////////////////////////////////
// siftmatch funtions
//////////////////////////////////////////////////////////////////////////////////////////
#define MULT_TBLOCK_DIMX 128
#define MULT_TBLOCK_DIMY 1
#define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX)
#define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY)
texture<uint4, 1, hipReadModeElementType> texDes1;
texture<uint4, 1, hipReadModeElementType> texDes2;
void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
///////////////////////////////////////////////////////////////
//Load feature descriptors
///////////////////////////////////////////////////////////////
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
///
if(idx2 >= num2) return;
///////////////////////////////////////////////////////////////////////////
//compare descriptors
int results[MULT_BLOCK_DIMY];
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0;
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
}
void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
hipLaunchKernelGGL(( MultiplyDescriptor_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL));
ProgramCU::CheckErrorCUDA("MultiplyDescriptor");
}
texture<float, 1, hipReadModeElementType> texLoc1;
texture<float2, 1, hipReadModeElementType> texLoc2;
struct Matrix33{float mat[3][3];};
void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp,
Matrix33 H, float hdistmax, Matrix33 F, float fdistmax)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY);
int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y;
int idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
__shared__ float loc1[MULT_BLOCK_DIMY * 2];
int read_idx1 = idx01 * 8 + threadIdx.x ;
int read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
if(threadIdx.x < MULT_BLOCK_DIMY * 2)
{
loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x);
}
__syncthreads();
if(idx2 >= num2) return;
int results[MULT_BLOCK_DIMY];
/////////////////////////////////////////////////////////////////////////////////////////////
//geometric verification
/////////////////////////////////////////////////////////////////////////////////////////////
int good_count = 0;
float2 loc2 = tex1Dfetch(texLoc2, idx2);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
float* loci = loc1 + i * 2;
float locx = loci[0], locy = loci[1];
//homography
float x[3], diff[2];
x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2];
x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2];
x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2];
diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x);
diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y);
if(diff[0] < hdistmax && diff[1] < hdistmax)
{
//check fundamental matrix
float fx1[3], ftx2[3], x2fx1, se;
fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2];
fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2];
//fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2];
ftx2[0] = F.mat[0][0] * locx + F.mat[1][0] * locy + F.mat[2][0];
ftx2[1] = F.mat[0][1] * locx + F.mat[1][1] * locy + F.mat[2][1];
ftx2[2] = F.mat[0][2] * locx + F.mat[1][2] * locy + F.mat[2][2];
x2fx1 = locx * ftx2[0] + locy * ftx2[1] + ftx2[2];
se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]);
results[i] = se < fdistmax? 0: -262144;
}else
{
results[i] = -262144;
}
}else
{
results[i] = -262144;
}
good_count += (results[i] >=0);
}
/////////////////////////////////////////////////////////////////////////////////////////////
///compare feature descriptors anyway
/////////////////////////////////////////////////////////////////////////////////////////////
if(good_count > 0)
{
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i= 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
}else
{
break;
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
else break;
}
}
}
void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2,
CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT,
float H[3][3], float hdistmax, float F[3][3], float fdistmax)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
Matrix33 MatF, MatH;
//copy the matrix
memcpy(MatF.mat, F, 9 * sizeof(float));
memcpy(MatH.mat, H, 9 * sizeof(float));
//thread blocks
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
//intermediate results
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3);
loc1->BindTexture(texLoc1);
loc2->BindTexture(texLoc2);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
hipLaunchKernelGGL(( MultiplyDescriptorG_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL),
MatH, hdistmax, MatF, fdistmax);
}
texture<int, 1, hipReadModeElementType> texDOT;
#define ROWMATCH_BLOCK_WIDTH 32
#define ROWMATCH_BLOCK_HEIGHT 1
void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax)
{
#if ROWMATCH_BLOCK_HEIGHT == 1
__shared__ int dotmax[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotidx[ROWMATCH_BLOCK_WIDTH];
int row = blockIdx.y;
#else
__shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
int* dotmax = x_dotmax[threadIdx.y];
int* dotnxt = x_dotnxt[threadIdx.y];
int* dotidx = x_dotidx[threadIdx.y];
int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y;
#endif
int base_address = IMUL(row , num2);
int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;
for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)
{
if(threadIdx.x + i < num2)
{
int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];//
bool test = v > t_dotmax;
t_dotnxt = test? t_dotmax : max(t_dotnxt, v);
t_dotidx = test? (threadIdx.x + i) : t_dotidx;
t_dotmax = test? v: t_dotmax;
}
__syncthreads();
}
dotmax[threadIdx.x] = t_dotmax;
dotnxt[threadIdx.x] = t_dotnxt;
dotidx[threadIdx.x] = t_dotidx;
__syncthreads();
#pragma unroll
for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2)
{
if(threadIdx.x < step)
{
int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step];
bool test = v2 > v1;
dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2);
dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x];
dotmax[threadIdx.x] = test? v2 : v1;
}
__syncthreads();
}
if(threadIdx.x == 0)
{
float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0));
float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1;
}
}
void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax)
{
int num1 = texDot->GetImgHeight();
int num2 = texDot->GetImgWidth();
dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);
dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);
texDot->BindTexture(texDOT);
hipLaunchKernelGGL(( RowMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int*)texDot->_cuData,
(int*)texMatch->_cuData, num2, distmax, ratiomax);
}
#define COLMATCH_BLOCK_WIDTH 32
//texture<int3, 1, hipReadModeElementType> texCT;
void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax)
{
int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x;
if(col >= num2) return;
int3 result = d_crt[col];//tex1Dfetch(texCT, col);
int read_idx = col + num2;
for(int i = 1; i < height; ++i, read_idx += num2)
{
int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx);
result = result.x < temp.x?
make_int3(temp.x, temp.y, max(result.x, temp.z)) :
make_int3(result.x, result.y, max(result.z, temp.x));
}
float dist = acos(min(result.x * 0.000003814697265625f, 1.0));
float distn = acos(min(result.z * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1;
}
void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax)
{
int height = texCRT->GetImgHeight();
int num2 = texCRT->GetImgWidth();
//texCRT->BindTexture(texCT);
dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);
dim3 block(COLMATCH_BLOCK_WIDTH);
hipLaunchKernelGGL(( ColMatch_Kernel), dim3(grid), dim3(block), 0, 0, (int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax);
}
#endif
| 957b4c76270254d61058f7770416b671439d24f5.cu | ////////////////////////////////////////////////////////////////////////////
// File: ProgramCU.cu
// Author: Changchang Wu
// Description : implementation of ProgramCU and all CUDA kernels
//
// Copyright (c) 2007 University of North Carolina at Chapel Hill
// All Rights Reserved
//
// Permission to use, copy, modify and distribute this software and its
// documentation for educational, research and non-profit purposes, without
// fee, and without a written agreement is hereby granted, provided that the
// above copyright notice and the following paragraph appear in all copies.
//
// The University of North Carolina at Chapel Hill make no representations
// about the suitability of this software for any purpose. It is provided
// 'as is' without express or implied warranty.
//
// Please send BUG REPORTS to [email protected]
//
////////////////////////////////////////////////////////////////////////////
#if defined(CUDA_SIFTGPU_ENABLED)
#include "GL/glew.h"
#include <iostream>
#include <algorithm>
using namespace std;
#include "CuTexImage.h"
#include "ProgramCU.h"
#include "GlobalUtil.h"
//Standard block size
#define BLOCK_DIM 16
#define BLOCK_LOG_DIM 4
#define IMUL(X,Y) __mul24(X,Y)
//#define FDIV(X,Y) ((X)/(Y))
#define FDIV(X,Y) __fdividef(X,Y)
//filter kernel
#define KERNEL_MAX_WIDTH 33
//#define MAX_THREAD_PER_BLOCK 512 = 16 * 32
//////////////////////////////larger block gives better performance
#define FILTERV_TILE_WIDTH 16
#define FILTERV_TILE_HEIGHT 128
#define FILTERV_TBLK_HEIGHT 32
////////////////////////////
#define FILTERH_TILE_WIDTH 128
__device__ __constant__ float d_kernel[KERNEL_MAX_WIDTH];
texture<float, 1, cudaReadModeElementType> texData;
texture<float2, 2, cudaReadModeElementType> texDataF2;
texture<float4, 1, cudaReadModeElementType> texDataF4;
texture<int4, 1, cudaReadModeElementType> texDataI4;
texture<int4, 1, cudaReadModeElementType> texDataList;
//template<int i> __device__ float Conv(float *data) { return Conv<i-1>(data) + data[i]*d_kernel[i];}
//template<> __device__ float Conv<0>(float *data) { return data[0] * d_kernel[0]; }
//////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterH( float* d_result, int width)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FILTERH_TILE_WIDTH + FW -1;
const int CACHE_COUNT = 2 + (CACHE_WIDTH - 2)/ FILTERH_TILE_WIDTH;
__shared__ float data[CACHE_WIDTH];
const int bcol = IMUL(blockIdx.x, FILTERH_TILE_WIDTH);
const int col = bcol + threadIdx.x;
const int index_min = IMUL(blockIdx.y, width);
const int index_max = index_min + width - 1;
int src_index = index_min + bcol - HALF_WIDTH + threadIdx.x;
int cache_index = threadIdx.x;
float value = 0;
#pragma unroll
for(int j = 0; j < CACHE_COUNT; ++j)
{
if(cache_index < CACHE_WIDTH)
{
int fetch_index = src_index < index_min? index_min : (src_index > index_max ? index_max : src_index);
data[cache_index] = tex1Dfetch(texData,fetch_index);
src_index += FILTERH_TILE_WIDTH;
cache_index += FILTERH_TILE_WIDTH;
}
}
__syncthreads();
if(col >= width) return;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[threadIdx.x + i]* d_kernel[i]);
}
// value = Conv<FW-1>(data + threadIdx.x);
d_result[index_min + col] = value;
}
////////////////////////////////////////////////////////////////////
template<int FW> __global__ void FilterV(float* d_result, int width, int height)
{
const int HALF_WIDTH = FW >> 1;
const int CACHE_WIDTH = FW + FILTERV_TILE_HEIGHT - 1;
const int TEMP = CACHE_WIDTH & 0xf;
//add some extra space to avoid bank conflict
#if FILTERV_TILE_WIDTH == 16
//make the stride 16 * n +/- 1
const int EXTRA = (TEMP == 1 || TEMP == 0) ? 1 - TEMP : 15 - TEMP;
#elif FILTERV_TILE_WIDTH == 8
//make the stride 16 * n +/- 2
const int EXTRA = (TEMP == 2 || TEMP == 1 || TEMP == 0) ? 2 - TEMP : (TEMP == 15? 3 : 14 - TEMP);
#elif FILTERV_TILE_WIDTH == 4
//make the stride 16 * n +/- 4
const int EXTRA = (TEMP >=0 && TEMP <=4) ? 4 - TEMP : (TEMP > 12? 20 - TEMP : 12 - TEMP);
#else
#error
#endif
const int CACHE_TRUE_WIDTH = CACHE_WIDTH + EXTRA;
const int CACHE_COUNT = (CACHE_WIDTH + FILTERV_TBLK_HEIGHT - 1) / FILTERV_TBLK_HEIGHT;
const int WRITE_COUNT = (FILTERV_TILE_HEIGHT + FILTERV_TBLK_HEIGHT -1) / FILTERV_TBLK_HEIGHT;
__shared__ float data[CACHE_TRUE_WIDTH * FILTERV_TILE_WIDTH];
const int row_block_first = IMUL(blockIdx.y, FILTERV_TILE_HEIGHT);
const int col = IMUL(blockIdx.x, FILTERV_TILE_WIDTH) + threadIdx.x;
const int row_first = row_block_first - HALF_WIDTH;
const int data_index_max = IMUL(height - 1, width) + col;
const int cache_col_start = threadIdx.y;
const int cache_row_start = IMUL(threadIdx.x, CACHE_TRUE_WIDTH);
int cache_index = cache_col_start + cache_row_start;
int data_index = IMUL(row_first + cache_col_start, width) + col;
if(col < width)
{
#pragma unroll
for(int i = 0; i < CACHE_COUNT; ++i)
{
if(cache_col_start < CACHE_WIDTH - i * FILTERV_TBLK_HEIGHT)
{
int fetch_index = data_index < col ? col : (data_index > data_index_max? data_index_max : data_index);
data[cache_index + i * FILTERV_TBLK_HEIGHT] = tex1Dfetch(texData,fetch_index);
data_index += IMUL(FILTERV_TBLK_HEIGHT, width);
}
}
}
__syncthreads();
if(col >= width) return;
int row = row_block_first + threadIdx.y;
int index_start = cache_row_start + threadIdx.y;
#pragma unroll
for(int i = 0; i < WRITE_COUNT; ++i,
row += FILTERV_TBLK_HEIGHT, index_start += FILTERV_TBLK_HEIGHT)
{
if(row < height)
{
int index_dest = IMUL(row, width) + col;
float value = 0;
#pragma unroll
for(int i = 0; i < FW; ++i)
{
value += (data[index_start + i] * d_kernel[i]);
}
d_result[index_dest] = value;
}
}
}
template<int LOG_SCALE> __global__ void UpsampleKernel(float* d_result, int width)
{
const int SCALE = (1 << LOG_SCALE), SCALE_MASK = (SCALE - 1);
const float INV_SCALE = 1.0f / (float(SCALE));
int col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(col >= width) return;
int row = blockIdx.y >> LOG_SCALE;
int index = row * width + col;
int dst_row = blockIdx.y;
int dst_idx= (width * dst_row + col) * SCALE;
int helper = blockIdx.y & SCALE_MASK;
if (helper)
{
float v11 = tex1Dfetch(texData, index);
float v12 = tex1Dfetch(texData, index + 1);
index += width;
float v21 = tex1Dfetch(texData, index);
float v22 = tex1Dfetch(texData, index + 1);
float w1 = INV_SCALE * helper, w2 = 1.0 - w1;
float v1 = (v21 * w1 + w2 * v11);
float v2 = (v22 * w1 + w2 * v12);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}else
{
float v1 = tex1Dfetch(texData, index);
float v2 = tex1Dfetch(texData, index + 1);
d_result[dst_idx] = v1;
#pragma unroll
for(int i = 1; i < SCALE; ++i)
{
const float r2 = i * INV_SCALE;
const float r1 = 1.0f - r2;
d_result[dst_idx +i] = v1 * r1 + v2 * r2;
}
}
}
////////////////////////////////////////////////////////////////////////////////////////
void ProgramCU::SampleImageU(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
src->BindTexture(texData);
dim3 grid((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height << log_scale);
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : UpsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, width); break;
case 2 : UpsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, width); break;
case 3 : UpsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, width); break;
default: break;
}
}
template<int LOG_SCALE> __global__ void DownsampleKernel(float* d_result, int src_width, int dst_width)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << LOG_SCALE), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << LOG_SCALE;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
__global__ void DownsampleKernel(float* d_result, int src_width, int dst_width, const int log_scale)
{
const int dst_col = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
if(dst_col >= dst_width) return;
const int src_col = min((dst_col << log_scale), (src_width - 1));
const int dst_row = blockIdx.y;
const int src_row = blockIdx.y << log_scale;
const int src_idx = IMUL(src_row, src_width) + src_col;
const int dst_idx = IMUL(dst_width, dst_row) + dst_col;
d_result[dst_idx] = tex1Dfetch(texData, src_idx);
}
void ProgramCU::SampleImageD(CuTexImage *dst, CuTexImage *src, int log_scale)
{
int src_width = src->GetImgWidth(), dst_width = dst->GetImgWidth() ;
src->BindTexture(texData);
dim3 grid((dst_width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, dst->GetImgHeight());
dim3 block(FILTERH_TILE_WIDTH);
switch(log_scale)
{
case 1 : DownsampleKernel<1> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
case 2 : DownsampleKernel<2> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
case 3 : DownsampleKernel<3> <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width); break;
default: DownsampleKernel <<< grid, block>>> ((float*) dst->_cuData, src_width, dst_width, log_scale);
}
}
__global__ void ChannelReduce_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
d_result[index] = tex1Dfetch(texData, index*4);
}
__global__ void ChannelReduce_Convert_Kernel(float* d_result)
{
int index = IMUL(blockIdx.x, FILTERH_TILE_WIDTH) + threadIdx.x;
float4 rgba = tex1Dfetch(texDataF4, index);
d_result[index] = 0.299f * rgba.x + 0.587f* rgba.y + 0.114f * rgba.z;
}
void ProgramCU::ReduceToSingleChannel(CuTexImage* dst, CuTexImage* src, int convert_rgb)
{
int width = src->GetImgWidth(), height = dst->GetImgHeight() ;
dim3 grid((width * height + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH);
dim3 block(FILTERH_TILE_WIDTH);
if(convert_rgb)
{
src->BindTexture(texDataF4);
ChannelReduce_Convert_Kernel<<<grid, block>>>((float*)dst->_cuData);
}else
{
src->BindTexture(texData);
ChannelReduce_Kernel<<<grid, block>>>((float*)dst->_cuData);
}
}
void ProgramCU::CreateFilterKernel(float sigma, float* kernel, int& width)
{
int i, sz = int( ceil( GlobalUtil::_FilterWidthFactor * sigma -0.5) ) ;//
width = 2*sz + 1;
//filter size truncation
if(width > KERNEL_MAX_WIDTH)
{
//std::cout<<"Filter truncated "<<width<<"->"<<KERNEL_MAX_WIDTH<<endl;
sz = KERNEL_MAX_WIDTH >> 1;
width =KERNEL_MAX_WIDTH;
}
float rv = 1.0f/(sigma*sigma), v, ksum =0;
// pre-compute filter
for( i = -sz ; i <= sz ; ++i)
{
kernel[i+sz] = v = exp(-0.5f * i * i *rv) ;
ksum += v;
}
//normalize the kernel
rv = 1.0f/ksum;
for(i = 0; i< width ;i++) kernel[i]*=rv;
}
template<int FW> void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf)
{
int width = src->GetImgWidth(), height = src->GetImgHeight();
//horizontal filtering
src->BindTexture(texData);
dim3 gridh((width + FILTERH_TILE_WIDTH - 1)/ FILTERH_TILE_WIDTH, height);
dim3 blockh(FILTERH_TILE_WIDTH);
FilterH<FW><<<gridh, blockh>>>((float*)buf->_cuData, width);
CheckErrorCUDA("FilterH");
///vertical filtering
buf->BindTexture(texData);
dim3 gridv((width + FILTERV_TILE_WIDTH - 1)/ FILTERV_TILE_WIDTH, (height + FILTERV_TILE_HEIGHT - 1)/FILTERV_TILE_HEIGHT);
dim3 blockv(FILTERV_TILE_WIDTH, FILTERV_TBLK_HEIGHT);
FilterV<FW><<<gridv, blockv>>>((float*)dst->_cuData, width, height);
CheckErrorCUDA("FilterV");
}
//////////////////////////////////////////////////////////////////////
// tested on 2048x1500 image, the time on pyramid construction is
// -pack cg version : 18ms
// -unpack cg version : 49 ms
// -cuda version: 28 ms
void ProgramCU::FilterImage(CuTexImage *dst, CuTexImage *src, CuTexImage* buf, float sigma)
{
float filter_kernel[KERNEL_MAX_WIDTH]; int width;
CreateFilterKernel(sigma, filter_kernel, width);
cudaMemcpyToSymbol(d_kernel, filter_kernel, width * sizeof(float), 0, cudaMemcpyHostToDevice);
switch(width)
{
case 5: FilterImage< 5>(dst, src, buf); break;
case 7: FilterImage< 7>(dst, src, buf); break;
case 9: FilterImage< 9>(dst, src, buf); break;
case 11: FilterImage<11>(dst, src, buf); break;
case 13: FilterImage<13>(dst, src, buf); break;
case 15: FilterImage<15>(dst, src, buf); break;
case 17: FilterImage<17>(dst, src, buf); break;
case 19: FilterImage<19>(dst, src, buf); break;
case 21: FilterImage<21>(dst, src, buf); break;
case 23: FilterImage<23>(dst, src, buf); break;
case 25: FilterImage<25>(dst, src, buf); break;
case 27: FilterImage<27>(dst, src, buf); break;
case 29: FilterImage<29>(dst, src, buf); break;
case 31: FilterImage<31>(dst, src, buf); break;
case 33: FilterImage<33>(dst, src, buf); break;
default: break;
}
}
#define DOG_BLOCK_DIMX 128
#define DOG_BLOCK_DIMY 1
#define DOG_BLOCK_LOG_DIMX 7
#define DOG_BLOCK_LOG_DIMY 0
texture<float, 1, cudaReadModeElementType> texC;
texture<float, 1, cudaReadModeElementType> texP;
texture<float, 1, cudaReadModeElementType> texN;
void __global__ ComputeDOG_Kernel(float* d_dog, float2* d_got, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
float vxn = tex1Dfetch(texC, index + 1);
float vxp = tex1Dfetch(texC, index - 1);
float vyp = tex1Dfetch(texC, index - width);
float vyn = tex1Dfetch(texC, index + width);
float dx = vxn - vxp, dy = vyn - vyp;
float grd = 0.5f * sqrt(dx * dx + dy * dy);
float rot = (grd == 0.0f? 0.0f : atan2(dy, dx));
d_got[index] = make_float2(grd, rot);
}
}
void __global__ ComputeDOG_Kernel(float* d_dog, int width, int height)
{
int row = (blockIdx.y << DOG_BLOCK_LOG_DIMY) + threadIdx.y;
int col = (blockIdx.x << DOG_BLOCK_LOG_DIMX) + threadIdx.x;
if(col < width && row < height)
{
int index = IMUL(row, width) + col;
float vp = tex1Dfetch(texP, index);
float v = tex1Dfetch(texC, index);
d_dog[index] = v - vp;
}
}
void ProgramCU::ComputeDOG(CuTexImage* gus, CuTexImage* dog, CuTexImage* got)
{
int width = gus->GetImgWidth(), height = gus->GetImgHeight();
dim3 grid((width + DOG_BLOCK_DIMX - 1)/ DOG_BLOCK_DIMX, (height + DOG_BLOCK_DIMY - 1)/DOG_BLOCK_DIMY);
dim3 block(DOG_BLOCK_DIMX, DOG_BLOCK_DIMY);
gus->BindTexture(texC);
(gus -1)->BindTexture(texP);
if(got->_cuData)
ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, (float2*) got->_cuData, width, height);
else
ComputeDOG_Kernel<<<grid, block>>>((float*) dog->_cuData, width, height);
}
#define KEY_BLOCK_LOG_DIMX 3
#define KEY_BLOCK_LOG_DIMY 3
#define KEY_BLOCK_DIMX (1<<KEY_BLOCK_LOG_DIMX)
#define KEY_BLOCK_DIMY (1<<KEY_BLOCK_LOG_DIMY)
//4/5, 3/2 -> 33
//4, 1 -> 45
//4, 0 -> 60
#define READ_CMP_DOG_DATA(datai, tex, idx) \
datai[0] = tex1Dfetch(tex, idx - 1);\
datai[1] = tex1Dfetch(tex, idx);\
datai[2] = tex1Dfetch(tex, idx + 1);\
if(v > nmax)\
{\
nmax = max(nmax, datai[0]);\
nmax = max(nmax, datai[1]);\
nmax = max(nmax, datai[2]);\
if(v < nmax) goto key_finish;\
}else\
{\
nmin = min(nmin, datai[0]);\
nmin = min(nmin, datai[1]);\
nmin = min(nmin, datai[2]);\
if(v > nmin) goto key_finish;\
}
void __global__ ComputeKEY_Kernel(float4* d_key, int width, int colmax, int rowmax,
float dog_threshold0, float dog_threshold, float edge_threshold, int subpixel_localization)
{
float data[3][3], v;
float datap[3][3], datan[3][3];
int row = (blockIdx.y << KEY_BLOCK_LOG_DIMY) + threadIdx.y + 1;
int col = (blockIdx.x << KEY_BLOCK_LOG_DIMX) + threadIdx.x + 1;
int index = IMUL(row, width) + col;
int idx[3] ={index - width, index, index + width};
int in_image =0;
float nmax, nmin, result = 0.0f;
float dx = 0, dy = 0, ds = 0;
bool offset_test_passed = true;
if(row < rowmax && col < colmax)
{
in_image = 1;
data[1][1] = v = tex1Dfetch(texC, idx[1]);
if(fabs(v) <= dog_threshold0) goto key_finish;
data[1][0] = tex1Dfetch(texC, idx[1] - 1);
data[1][2] = tex1Dfetch(texC, idx[1] + 1);
nmax = max(data[1][0], data[1][2]);
nmin = min(data[1][0], data[1][2]);
if(v <=nmax && v >= nmin) goto key_finish;
//if((v > nmax && v < 0 )|| (v < nmin && v > 0)) goto key_finish;
READ_CMP_DOG_DATA(data[0], texC, idx[0]);
READ_CMP_DOG_DATA(data[2], texC, idx[2]);
//edge supression
float vx2 = v * 2.0f;
float fxx = data[1][0] + data[1][2] - vx2;
float fyy = data[0][1] + data[2][1] - vx2;
float fxy = 0.25f * (data[2][2] + data[0][0] - data[2][0] - data[0][2]);
float temp1 = fxx * fyy - fxy * fxy;
float temp2 = (fxx + fyy) * (fxx + fyy);
if(temp1 <=0 || temp2 > edge_threshold * temp1) goto key_finish;
//read the previous level
READ_CMP_DOG_DATA(datap[0], texP, idx[0]);
READ_CMP_DOG_DATA(datap[1], texP, idx[1]);
READ_CMP_DOG_DATA(datap[2], texP, idx[2]);
//read the next level
READ_CMP_DOG_DATA(datan[0], texN, idx[0]);
READ_CMP_DOG_DATA(datan[1], texN, idx[1]);
READ_CMP_DOG_DATA(datan[2], texN, idx[2]);
if(subpixel_localization)
{
//subpixel localization
float fx = 0.5f * (data[1][2] - data[1][0]);
float fy = 0.5f * (data[2][1] - data[0][1]);
float fs = 0.5f * (datan[1][1] - datap[1][1]);
float fss = (datan[1][1] + datap[1][1] - vx2);
float fxs = 0.25f* (datan[1][2] + datap[1][0] - datan[1][0] - datap[1][2]);
float fys = 0.25f* (datan[2][1] + datap[0][1] - datan[0][1] - datap[2][1]);
//need to solve dx, dy, ds;
// |-fx| | fxx fxy fxs | |dx|
// |-fy| = | fxy fyy fys | * |dy|
// |-fs| | fxs fys fss | |ds|
float4 A0 = fxx > 0? make_float4(fxx, fxy, fxs, -fx) : make_float4(-fxx, -fxy, -fxs, fx);
float4 A1 = fxy > 0? make_float4(fxy, fyy, fys, -fy) : make_float4(-fxy, -fyy, -fys, fy);
float4 A2 = fxs > 0? make_float4(fxs, fys, fss, -fs) : make_float4(-fxs, -fys, -fss, fs);
float maxa = max(max(A0.x, A1.x), A2.x);
if(maxa >= 1e-10)
{
if(maxa == A1.x)
{
float4 TEMP = A1; A1 = A0; A0 = TEMP;
}else if(maxa == A2.x)
{
float4 TEMP = A2; A2 = A0; A0 = TEMP;
}
A0.y /= A0.x; A0.z /= A0.x; A0.w/= A0.x;
A1.y -= A1.x * A0.y; A1.z -= A1.x * A0.z; A1.w -= A1.x * A0.w;
A2.y -= A2.x * A0.y; A2.z -= A2.x * A0.z; A2.w -= A2.x * A0.w;
if(abs(A2.y) > abs(A1.y))
{
float4 TEMP = A2; A2 = A1; A1 = TEMP;
}
if(abs(A1.y) >= 1e-10)
{
A1.z /= A1.y; A1.w /= A1.y;
A2.z -= A2.y * A1.z; A2.w -= A2.y * A1.w;
if(abs(A2.z) >= 1e-10)
{
ds = A2.w / A2.z;
dy = A1.w - ds * A1.z;
dx = A0.w - ds * A0.z - dy * A0.y;
offset_test_passed =
fabs(data[1][1] + 0.5f * (dx * fx + dy * fy + ds * fs)) > dog_threshold
&&fabs(ds) < 1.0f && fabs(dx) < 1.0f && fabs(dy) < 1.0f;
}
}
}
}
if(offset_test_passed) result = v > nmax ? 1.0 : -1.0;
}
key_finish:
if(in_image) d_key[index] = make_float4(result, dx, dy, ds);
}
void ProgramCU::ComputeKEY(CuTexImage* dog, CuTexImage* key, float Tdog, float Tedge)
{
int width = dog->GetImgWidth(), height = dog->GetImgHeight();
float Tdog1 = (GlobalUtil::_SubpixelLocalization? 0.8f : 1.0f) * Tdog;
CuTexImage* dogp = dog - 1;
CuTexImage* dogn = dog + 1;
dim3 grid((width - 1 + KEY_BLOCK_DIMX - 1)/ KEY_BLOCK_DIMX, (height - 1 + KEY_BLOCK_DIMY - 1)/KEY_BLOCK_DIMY);
dim3 block(KEY_BLOCK_DIMX, KEY_BLOCK_DIMY);
dogp->BindTexture(texP);
dog ->BindTexture(texC);
dogn->BindTexture(texN);
Tedge = (Tedge+1)*(Tedge+1)/Tedge;
ComputeKEY_Kernel<<<grid, block>>>((float4*) key->_cuData, width, width -1, height -1,
Tdog1, Tdog, Tedge, GlobalUtil::_SubpixelLocalization);
}
#define HIST_INIT_WIDTH 128
void __global__ InitHist_Kernel(int4* hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
if(row > 0 && row < height -1)
{
#pragma unroll
for(int i = 0; i < 4 ; ++i, ++scol)
{
float4 temp = tex1Dfetch(texDataF4, sidx +i);
v[i] = (scol < ws -1 && scol > 0 && temp.x!=0) ? 1 : 0;
}
}
hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::InitHistogram(CuTexImage* key, CuTexImage* hist)
{
int ws = key->GetImgWidth(), hs = key->GetImgHeight();
int wd = hist->GetImgWidth(), hd = hist->GetImgHeight();
dim3 grid((wd + HIST_INIT_WIDTH - 1)/ HIST_INIT_WIDTH, hd);
dim3 block(HIST_INIT_WIDTH, 1);
key->BindTexture(texDataF4);
InitHist_Kernel<<<grid, block>>>((int4*) hist->_cuData, ws, wd, hd);
//CheckHistInit(key, hist);
}
void __global__ ReduceHist_Kernel(int4* d_hist, int ws, int wd, int height)
{
int row = IMUL(blockIdx.y, blockDim.y) + threadIdx.y;
int col = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(row < height && col < wd)
{
int hidx = IMUL(row, wd) + col;
int scol = col << 2;
int sidx = IMUL(row, ws) + scol;
int v[4] = {0, 0, 0, 0};
#pragma unroll
for(int i = 0; i < 4 && scol < ws; ++i, ++scol)
{
int4 temp = tex1Dfetch(texDataI4, sidx + i);
v[i] = temp.x + temp.y + temp.z + temp.w;
}
d_hist[hidx] = make_int4(v[0], v[1], v[2], v[3]);
}
}
void ProgramCU::ReduceHistogram(CuTexImage*hist1, CuTexImage* hist2)
{
int ws = hist1->GetImgWidth(), hs = hist1->GetImgHeight();
int wd = hist2->GetImgWidth(), hd = hist2->GetImgHeight();
int temp = (int)floor(logf(float(wd * 2/ 3)) / logf(2.0f));
const int wi = min(7, max(temp , 0));
hist1->BindTexture(texDataI4);
const int BW = 1 << wi, BH = 1 << (7 - wi);
dim3 grid((wd + BW - 1)/ BW, (hd + BH -1) / BH);
dim3 block(BW, BH);
ReduceHist_Kernel<<<grid, block>>>((int4*)hist2->_cuData, ws, wd, hd);
}
#define LISTGEN_BLOCK_DIM 128
void __global__ ListGen_Kernel(int4* d_list, int width)
{
int idx1 = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int4 pos = tex1Dfetch(texDataList, idx1);
int idx2 = IMUL(pos.y, width) + pos.x;
int4 temp = tex1Dfetch(texDataI4, idx2);
int sum1 = temp.x + temp.y;
int sum2 = sum1 + temp.z;
pos.x <<= 2;
if(pos.z >= sum2)
{
pos.x += 3;
pos.z -= sum2;
}else if(pos.z >= sum1)
{
pos.x += 2;
pos.z -= sum1;
}else if(pos.z >= temp.x)
{
pos.x += 1;
pos.z -= temp.x;
}
d_list[idx1] = pos;
}
//input list (x, y) (x, y) ....
void ProgramCU::GenerateList(CuTexImage* list, CuTexImage* hist)
{
int len = list->GetImgWidth();
list->BindTexture(texDataList);
hist->BindTexture(texDataI4);
dim3 grid((len + LISTGEN_BLOCK_DIM -1) /LISTGEN_BLOCK_DIM);
dim3 block(LISTGEN_BLOCK_DIM);
ListGen_Kernel<<<grid, block>>>((int4*) list->_cuData, hist->GetImgWidth());
}
void __global__ ComputeOrientation_Kernel(float4* d_list,
int list_len,
int width, int height,
float sigma, float sigma_step,
float gaussian_factor, float sample_factor,
int num_orientation,
int existing_keypoint,
int subpixel,
int keepsign)
{
const float ten_degree_per_radius = 5.7295779513082320876798154814105;
const float radius_per_ten_degrees = 1.0 / 5.7295779513082320876798154814105;
int idx = IMUL(blockDim.x, blockIdx.x) + threadIdx.x;
if(idx >= list_len) return;
float4 key;
if(existing_keypoint)
{
key = tex1Dfetch(texDataF4, idx);
}else
{
int4 ikey = tex1Dfetch(texDataList, idx);
key.x = ikey.x + 0.5f;
key.y = ikey.y + 0.5f;
key.z = sigma;
if(subpixel || keepsign)
{
float4 offset = tex1Dfetch(texDataF4, IMUL(width, ikey.y) + ikey.x);
if(subpixel)
{
key.x += offset.y;
key.y += offset.z;
key.z *= pow(sigma_step, offset.w);
}
if(keepsign) key.z *= offset.x;
}
}
if(num_orientation == 0)
{
key.w = 0;
d_list[idx] = key;
return;
}
float vote[37];
float gsigma = key.z * gaussian_factor;
float win = fabs(key.z) * sample_factor;
float dist_threshold = win * win + 0.5;
float factor = -0.5f / (gsigma * gsigma);
float xmin = max(1.5f, floor(key.x - win) + 0.5f);
float ymin = max(1.5f, floor(key.y - win) + 0.5f);
float xmax = min(width - 1.5f, floor(key.x + win) + 0.5f);
float ymax = min(height -1.5f, floor(key.y + win) + 0.5f);
#pragma unroll
for(int i = 0; i < 36; ++i) vote[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - key.x;
float dy = y - key.y;
float sq_dist = dx * dx + dy * dy;
if(sq_dist >= dist_threshold) continue;
float2 got = tex2D(texDataF2, x, y);
float weight = got.x * exp(sq_dist * factor);
float fidx = floor(got.y * ten_degree_per_radius);
int oidx = fidx;
if(oidx < 0) oidx += 36;
vote[oidx] += weight;
}
}
//filter the vote
const float one_third = 1.0 /3.0;
#pragma unroll
for(int i = 0; i < 6; ++i)
{
vote[36] = vote[0];
float pre = vote[35];
#pragma unroll
for(int j = 0; j < 36; ++j)
{
float temp = one_third * (pre + vote[j] + vote[j + 1]);
pre = vote[j]; vote[j] = temp;
}
}
vote[36] = vote[0];
if(num_orientation == 1 || existing_keypoint)
{
int index_max = 0;
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i)
{
index_max = vote[i] > max_vote? i : index_max;
max_vote = max(max_vote, vote[i]);
}
float pre = vote[index_max == 0? 35 : index_max -1];
float next = vote[index_max + 1];
float weight = max_vote;
float off = 0.5f * FDIV(next - pre, weight + weight - next - pre);
key.w = radius_per_ten_degrees * (index_max + 0.5f + off);
d_list[idx] = key;
}else
{
float max_vote = vote[0];
#pragma unroll
for(int i = 1; i < 36; ++i) max_vote = max(max_vote, vote[i]);
float vote_threshold = max_vote * 0.8f;
float pre = vote[35];
float max_rot[2], max_vot[2] = {0, 0};
int ocount = 0;
#pragma unroll
for(int i =0; i < 36; ++i)
{
float next = vote[i + 1];
if(vote[i] > vote_threshold && vote[i] > pre && vote[i] > next)
{
float di = 0.5f * FDIV(next - pre, vote[i] + vote[i] - next - pre);
float rot = i + di + 0.5f;
float weight = vote[i];
///
if(weight > max_vot[1])
{
if(weight > max_vot[0])
{
max_vot[1] = max_vot[0];
max_rot[1] = max_rot[0];
max_vot[0] = weight;
max_rot[0] = rot;
}
else
{
max_vot[1] = weight;
max_rot[1] = rot;
}
ocount ++;
}
}
pre = vote[i];
}
float fr1 = max_rot[0] / 36.0f;
if(fr1 < 0) fr1 += 1.0f;
unsigned short us1 = ocount == 0? 65535 : ((unsigned short )floor(fr1 * 65535.0f));
unsigned short us2 = 65535;
if(ocount > 1)
{
float fr2 = max_rot[1] / 36.0f;
if(fr2 < 0) fr2 += 1.0f;
us2 = (unsigned short ) floor(fr2 * 65535.0f);
}
unsigned int uspack = (us2 << 16) | us1;
key.w = __int_as_float(uspack);
d_list[idx] = key;
}
}
void ProgramCU::ComputeOrientation(CuTexImage* list, CuTexImage* got, CuTexImage*key,
float sigma, float sigma_step, int existing_keypoint)
{
int len = list->GetImgWidth();
if(len <= 0) return;
int width = got->GetImgWidth(), height = got->GetImgHeight();
if(existing_keypoint)
{
list->BindTexture(texDataF4);
}else
{
list->BindTexture(texDataList);
if(GlobalUtil::_SubpixelLocalization) key->BindTexture(texDataF4);
}
got->BindTexture2D(texDataF2);
const int block_width = 64;
dim3 grid((len + block_width -1) / block_width);
dim3 block(block_width);
ComputeOrientation_Kernel<<<grid, block>>>((float4*) list->_cuData,
len, width, height, sigma, sigma_step,
GlobalUtil::_OrientationGaussianFactor,
GlobalUtil::_OrientationGaussianFactor * GlobalUtil::_OrientationWindowFactor,
GlobalUtil::_FixedOrientation? 0 : GlobalUtil::_MaxOrientation,
existing_keypoint, GlobalUtil::_SubpixelLocalization, GlobalUtil::_KeepExtremumSign);
ProgramCU::CheckErrorCUDA("ComputeOrientation");
}
template <bool DYNAMIC_INDEXING> void __global__ ComputeDescriptor_Kernel(float4* d_des, int num,
int width, int height, float window_factor)
{
const float rpi = 4.0/ 3.14159265358979323846;
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
int fidx = idx >> 4;
if(fidx >= num) return;
float4 key = tex1Dfetch(texDataF4, fidx);
int bidx = idx& 0xf, ix = bidx & 0x3, iy = bidx >> 2;
float spt = fabs(key.z * window_factor);
float s, c; __sincosf(key.w, &s, &c);
float anglef = key.w > 3.14159265358979323846? key.w - (2.0 * 3.14159265358979323846) : key.w ;
float cspt = c * spt, sspt = s * spt;
float crspt = c / spt, srspt = s / spt;
float2 offsetpt, pt;
float xmin, ymin, xmax, ymax, bsz;
offsetpt.x = ix - 1.5f;
offsetpt.y = iy - 1.5f;
pt.x = cspt * offsetpt.x - sspt * offsetpt.y + key.x;
pt.y = cspt * offsetpt.y + sspt * offsetpt.x + key.y;
bsz = fabs(cspt) + fabs(sspt);
xmin = max(1.5f, floor(pt.x - bsz) + 0.5f);
ymin = max(1.5f, floor(pt.y - bsz) + 0.5f);
xmax = min(width - 1.5f, floor(pt.x + bsz) + 0.5f);
ymax = min(height - 1.5f, floor(pt.y + bsz) + 0.5f);
float des[9];
#pragma unroll
for(int i =0; i < 9; ++i) des[i] = 0.0f;
for(float y = ymin; y <= ymax; y += 1.0f)
{
for(float x = xmin; x <= xmax; x += 1.0f)
{
float dx = x - pt.x;
float dy = y - pt.y;
float nx = crspt * dx + srspt * dy;
float ny = crspt * dy - srspt * dx;
float nxn = fabs(nx);
float nyn = fabs(ny);
if(nxn < 1.0f && nyn < 1.0f)
{
float2 cc = tex2D(texDataF2, x, y);
float dnx = nx + offsetpt.x;
float dny = ny + offsetpt.y;
float ww = exp(-0.125f * (dnx * dnx + dny * dny));
float wx = 1.0 - nxn;
float wy = 1.0 - nyn;
float weight = ww * wx * wy * cc.x;
float theta = (anglef - cc.y) * rpi;
if(theta < 0) theta += 8.0f;
float fo = floor(theta);
int fidx = fo;
float weight1 = fo + 1.0f - theta;
float weight2 = theta - fo;
if(DYNAMIC_INDEXING)
{
des[fidx] += (weight1 * weight);
des[fidx + 1] += (weight2 * weight);
//this dynamic indexing part might be slow
}else
{
#pragma unroll
for(int k = 0; k < 8; ++k)
{
if(k == fidx)
{
des[k] += (weight1 * weight);
des[k+1] += (weight2 * weight);
}
}
}
}
}
}
des[0] += des[8];
int didx = idx << 1;
d_des[didx] = make_float4(des[0], des[1], des[2], des[3]);
d_des[didx+1] = make_float4(des[4], des[5], des[6], des[7]);
}
void __global__ NormalizeDescriptor_Kernel(float4* d_des, int num)
{
float4 temp[32];
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int sidx = idx << 5;
float norm1 = 0, norm2 = 0;
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i] = tex1Dfetch(texDataF4, sidx +i);
norm1 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm1 = rsqrt(norm1);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x = min(0.2f, temp[i].x * norm1);
temp[i].y = min(0.2f, temp[i].y * norm1);
temp[i].z = min(0.2f, temp[i].z * norm1);
temp[i].w = min(0.2f, temp[i].w * norm1);
norm2 += (temp[i].x * temp[i].x + temp[i].y * temp[i].y +
temp[i].z * temp[i].z + temp[i].w * temp[i].w);
}
norm2 = rsqrt(norm2);
#pragma unroll
for(int i = 0; i < 32; ++i)
{
temp[i].x *= norm2; temp[i].y *= norm2;
temp[i].z *= norm2; temp[i].w *= norm2;
d_des[sidx + i] = temp[i];
}
}
void ProgramCU::ComputeDescriptor(CuTexImage*list, CuTexImage* got, CuTexImage* dtex)
{
int num = list->GetImgWidth();
int width = got->GetImgWidth();
int height = got->GetImgHeight();
dtex->InitTexture(num*128, 1, 1);
got->BindTexture2D(texDataF2);
list->BindTexture(texDataF4);
int block_width = 64;
dim3 grid((num * 16 + block_width -1) / block_width);
dim3 block(block_width);
if(GlobalUtil::_UseDynamicIndexing)
ComputeDescriptor_Kernel<true><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
else
ComputeDescriptor_Kernel<false><<<grid, block>>>((float4*) dtex->_cuData, num, width, height, GlobalUtil::_DescriptorWindowFactor);
if(GlobalUtil::_NormalizedSIFT)
{
dtex->BindTexture(texDataF4);
const int block_width = 32;
dim3 grid((num + block_width -1) / block_width);
dim3 block(block_width);
NormalizeDescriptor_Kernel<<<grid, block>>>((float4*) dtex->_cuData, num);
}
CheckErrorCUDA("ComputeDescriptor");
}
//////////////////////////////////////////////////////
void ProgramCU::FinishCUDA()
{
cudaThreadSynchronize();
}
void ProgramCU::CheckErrorCUDA(const char* location)
{
cudaError_t e = cudaGetLastError();
if(e)
{
if(location) std::cerr << location << ":\t";
std::cerr << cudaGetErrorString(e) << endl;
}
}
void __global__ ConvertDOG_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0.5 : saturate(0.5+20.0*v);
}
}
///
void ProgramCU::DisplayConvertDOG(CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = dog->GetImgWidth(), height = dog ->GetImgHeight();
dog->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertDOG_Kernel<<<grid, block>>>((float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertDOG");
}
void __global__ ConvertGRD_Kernel(float* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float v = tex1Dfetch(texData, index << 1);
d_result[index] = (col == 0 || row == 0 || col == width -1 || row == height -1)?
0 : saturate(5 * v);
}
}
void ProgramCU::DisplayConvertGRD(CuTexImage* got, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = got->GetImgWidth(), height = got ->GetImgHeight();
got->BindTexture(texData);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertGRD_Kernel<<<grid, block>>>((float*) out->_cuData, width, height);
ProgramCU::CheckErrorCUDA("DisplayConvertGRD");
}
void __global__ ConvertKEY_Kernel(float4* d_result, int width, int height)
{
int row = (blockIdx.y << BLOCK_LOG_DIM) + threadIdx.y;
int col = (blockIdx.x << BLOCK_LOG_DIM) + threadIdx.x;
if(col < width && row < height)
{
int index = row * width + col;
float4 keyv = tex1Dfetch(texDataF4, index);
int is_key = (keyv.x == 1.0f || keyv.x == -1.0f);
int inside = col > 0 && row > 0 && row < height -1 && col < width - 1;
float v = inside? saturate(0.5 + 20 * tex1Dfetch(texData, index)) : 0.5;
d_result[index] = is_key && inside ?
(keyv.x > 0? make_float4(1.0f, 0, 0, 1.0f) : make_float4(0.0f, 1.0f, 0.0f, 1.0f)):
make_float4(v, v, v, 1.0f) ;
}
}
void ProgramCU::DisplayConvertKEY(CuTexImage* key, CuTexImage* dog, CuTexImage* out)
{
if(out->_cuData == NULL) return;
int width = key->GetImgWidth(), height = key ->GetImgHeight();
dog->BindTexture(texData);
key->BindTexture(texDataF4);
dim3 grid((width + BLOCK_DIM - 1)/ BLOCK_DIM, (height + BLOCK_DIM - 1)/BLOCK_DIM);
dim3 block(BLOCK_DIM, BLOCK_DIM);
ConvertKEY_Kernel<<<grid, block>>>((float4*) out->_cuData, width, height);
}
void __global__ DisplayKeyPoint_Kernel(float4 * d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
float4 v = tex1Dfetch(texDataF4, idx);
d_result[idx] = make_float4(v.x, v.y, 0, 1.0f);
}
void ProgramCU::DisplayKeyPoint(CuTexImage* ftex, CuTexImage* out)
{
int num = ftex->GetImgWidth();
int block_width = 64;
dim3 grid((num + block_width -1) /block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
DisplayKeyPoint_Kernel<<<grid, block>>>((float4*) out->_cuData, num);
ProgramCU::CheckErrorCUDA("DisplayKeyPoint");
}
void __global__ DisplayKeyBox_Kernel(float4* d_result, int num)
{
int idx = IMUL(blockIdx.x, blockDim.x) + threadIdx.x;
if(idx >= num) return;
int kidx = idx / 10, vidx = idx - IMUL(kidx , 10);
float4 v = tex1Dfetch(texDataF4, kidx);
float sz = fabs(v.z * 3.0f);
///////////////////////
float s, c; __sincosf(v.w, &s, &c);
///////////////////////
float dx = vidx == 0? 0 : ((vidx <= 4 || vidx >= 9)? sz : -sz);
float dy = vidx <= 1? 0 : ((vidx <= 2 || vidx >= 7)? -sz : sz);
float4 pos;
pos.x = v.x + c * dx - s * dy;
pos.y = v.y + c * dy + s * dx;
pos.z = 0; pos.w = 1.0f;
d_result[idx] = pos;
}
void ProgramCU::DisplayKeyBox(CuTexImage* ftex, CuTexImage* out)
{
int len = ftex->GetImgWidth();
int block_width = 32;
dim3 grid((len * 10 + block_width -1) / block_width);
dim3 block(block_width);
ftex->BindTexture(texDataF4);
DisplayKeyBox_Kernel<<<grid, block>>>((float4*) out->_cuData, len * 10);
}
///////////////////////////////////////////////////////////////////
inline void CuTexImage:: BindTexture(textureReference& texRef)
{
cudaBindTexture(NULL, &texRef, _cuData, &texRef.channelDesc, _numBytes);
}
inline void CuTexImage::BindTexture2D(textureReference& texRef)
{
cudaChannelFormatDesc desc;
cudaGetChannelDesc(&desc, _cuData2D);
cudaBindTextureToArray(&texRef, _cuData2D, &desc);
}
int ProgramCU::IsCudaSupported()
{
int deviceCount;
cudaGetDeviceCount(&deviceCount);
return deviceCount;
}
////////////////////////////////////////////////////////////////////////////////////////
// siftmatch funtions
//////////////////////////////////////////////////////////////////////////////////////////
#define MULT_TBLOCK_DIMX 128
#define MULT_TBLOCK_DIMY 1
#define MULT_BLOCK_DIMX (MULT_TBLOCK_DIMX)
#define MULT_BLOCK_DIMY (8 * MULT_TBLOCK_DIMY)
texture<uint4, 1, cudaReadModeElementType> texDes1;
texture<uint4, 1, cudaReadModeElementType> texDes2;
void __global__ MultiplyDescriptor_Kernel(int* d_result, int num1, int num2, int3* d_temp)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY), idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y, idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
int read_idx1 = idx01 * 8 + threadIdx.x, read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
///////////////////////////////////////////////////////////////
//Load feature descriptors
///////////////////////////////////////////////////////////////
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x; data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z; data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
///
if(idx2 >= num2) return;
///////////////////////////////////////////////////////////////////////////
//compare descriptors
int results[MULT_BLOCK_DIMY];
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i) results[i] = 0;
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = results[i];
}
}
}
void ProgramCU::MultiplyDescriptor(CuTexImage* des1, CuTexImage* des2, CuTexImage* texDot, CuTexImage* texCRT)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture(num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 32);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
MultiplyDescriptor_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL));
ProgramCU::CheckErrorCUDA("MultiplyDescriptor");
}
texture<float, 1, cudaReadModeElementType> texLoc1;
texture<float2, 1, cudaReadModeElementType> texLoc2;
struct Matrix33{float mat[3][3];};
void __global__ MultiplyDescriptorG_Kernel(int* d_result, int num1, int num2, int3* d_temp,
Matrix33 H, float hdistmax, Matrix33 F, float fdistmax)
{
int idx01 = (blockIdx.y * MULT_BLOCK_DIMY);
int idx02 = (blockIdx.x * MULT_BLOCK_DIMX);
int idx1 = idx01 + threadIdx.y;
int idx2 = idx02 + threadIdx.x;
__shared__ int data1[17 * 2 * MULT_BLOCK_DIMY];
__shared__ float loc1[MULT_BLOCK_DIMY * 2];
int read_idx1 = idx01 * 8 + threadIdx.x ;
int read_idx2 = idx2 * 8;
int col4 = threadIdx.x & 0x3, row4 = threadIdx.x >> 2;
int cache_idx1 = IMUL(row4, 17) + (col4 << 2);
#if MULT_BLOCK_DIMY == 16
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
#elif MULT_BLOCK_DIMY == 8
if(threadIdx.x < 64)
{
uint4 v = tex1Dfetch(texDes1, read_idx1);
data1[cache_idx1] = v.x;
data1[cache_idx1+1] = v.y;
data1[cache_idx1+2] = v.z;
data1[cache_idx1+3] = v.w;
}
#else
#error
#endif
__syncthreads();
if(threadIdx.x < MULT_BLOCK_DIMY * 2)
{
loc1[threadIdx.x] = tex1Dfetch(texLoc1, 2 * idx01 + threadIdx.x);
}
__syncthreads();
if(idx2 >= num2) return;
int results[MULT_BLOCK_DIMY];
/////////////////////////////////////////////////////////////////////////////////////////////
//geometric verification
/////////////////////////////////////////////////////////////////////////////////////////////
int good_count = 0;
float2 loc2 = tex1Dfetch(texLoc2, idx2);
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
float* loci = loc1 + i * 2;
float locx = loci[0], locy = loci[1];
//homography
float x[3], diff[2];
x[0] = H.mat[0][0] * locx + H.mat[0][1] * locy + H.mat[0][2];
x[1] = H.mat[1][0] * locx + H.mat[1][1] * locy + H.mat[1][2];
x[2] = H.mat[2][0] * locx + H.mat[2][1] * locy + H.mat[2][2];
diff[0] = fabs(FDIV(x[0], x[2]) - loc2.x);
diff[1] = fabs(FDIV(x[1], x[2]) - loc2.y);
if(diff[0] < hdistmax && diff[1] < hdistmax)
{
//check fundamental matrix
float fx1[3], ftx2[3], x2fx1, se;
fx1[0] = F.mat[0][0] * locx + F.mat[0][1] * locy + F.mat[0][2];
fx1[1] = F.mat[1][0] * locx + F.mat[1][1] * locy + F.mat[1][2];
//fx1[2] = F.mat[2][0] * locx + F.mat[2][1] * locy + F.mat[2][2];
ftx2[0] = F.mat[0][0] * locx + F.mat[1][0] * locy + F.mat[2][0];
ftx2[1] = F.mat[0][1] * locx + F.mat[1][1] * locy + F.mat[2][1];
ftx2[2] = F.mat[0][2] * locx + F.mat[1][2] * locy + F.mat[2][2];
x2fx1 = locx * ftx2[0] + locy * ftx2[1] + ftx2[2];
se = FDIV(x2fx1 * x2fx1, fx1[0] * fx1[0] + fx1[1] * fx1[1] + ftx2[0] * ftx2[0] + ftx2[1] * ftx2[1]);
results[i] = se < fdistmax? 0: -262144;
}else
{
results[i] = -262144;
}
}else
{
results[i] = -262144;
}
good_count += (results[i] >=0);
}
/////////////////////////////////////////////////////////////////////////////////////////////
///compare feature descriptors anyway
/////////////////////////////////////////////////////////////////////////////////////////////
if(good_count > 0)
{
#pragma unroll
for(int i = 0; i < 8; ++i)
{
uint4 v = tex1Dfetch(texDes2, read_idx2 + i);
unsigned char* p2 = (unsigned char*)(&v);
#pragma unroll
for(int k = 0; k < MULT_BLOCK_DIMY; ++k)
{
unsigned char* p1 = (unsigned char*) (data1 + k * 34 + i * 4 + (i/4));
results[k] += ( IMUL(p1[0], p2[0]) + IMUL(p1[1], p2[1])
+ IMUL(p1[2], p2[2]) + IMUL(p1[3], p2[3])
+ IMUL(p1[4], p2[4]) + IMUL(p1[5], p2[5])
+ IMUL(p1[6], p2[6]) + IMUL(p1[7], p2[7])
+ IMUL(p1[8], p2[8]) + IMUL(p1[9], p2[9])
+ IMUL(p1[10], p2[10]) + IMUL(p1[11], p2[11])
+ IMUL(p1[12], p2[12]) + IMUL(p1[13], p2[13])
+ IMUL(p1[14], p2[14]) + IMUL(p1[15], p2[15]));
}
}
}
int dst_idx = IMUL(idx1, num2) + idx2;
if(d_temp)
{
int3 cmp_result = make_int3(0, -1, 0);
#pragma unroll
for(int i= 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1)
{
cmp_result = results[i] > cmp_result.x?
make_int3(results[i], idx1 + i, cmp_result.x) :
make_int3(cmp_result.x, cmp_result.y, max(cmp_result.z, results[i]));
d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
}else
{
break;
}
}
d_temp[ IMUL(blockIdx.y, num2) + idx2] = cmp_result;
}else
{
#pragma unroll
for(int i = 0; i < MULT_BLOCK_DIMY; ++i)
{
if(idx1 + i < num1) d_result[dst_idx + IMUL(i, num2)] = max(results[i], 0);
else break;
}
}
}
void ProgramCU::MultiplyDescriptorG(CuTexImage* des1, CuTexImage* des2,
CuTexImage* loc1, CuTexImage* loc2, CuTexImage* texDot, CuTexImage* texCRT,
float H[3][3], float hdistmax, float F[3][3], float fdistmax)
{
int num1 = des1->GetImgWidth() / 8;
int num2 = des2->GetImgWidth() / 8;
Matrix33 MatF, MatH;
//copy the matrix
memcpy(MatF.mat, F, 9 * sizeof(float));
memcpy(MatH.mat, H, 9 * sizeof(float));
//thread blocks
dim3 grid( (num2 + MULT_BLOCK_DIMX - 1)/ MULT_BLOCK_DIMX,
(num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY);
dim3 block(MULT_TBLOCK_DIMX, MULT_TBLOCK_DIMY);
//intermediate results
texDot->InitTexture( num2,num1);
if(texCRT) texCRT->InitTexture( num2, (num1 + MULT_BLOCK_DIMY - 1)/MULT_BLOCK_DIMY, 3);
loc1->BindTexture(texLoc1);
loc2->BindTexture(texLoc2);
des1->BindTexture(texDes1);
des2->BindTexture(texDes2);
MultiplyDescriptorG_Kernel<<<grid, block>>>((int*)texDot->_cuData, num1, num2,
(texCRT? (int3*)texCRT->_cuData : NULL),
MatH, hdistmax, MatF, fdistmax);
}
texture<int, 1, cudaReadModeElementType> texDOT;
#define ROWMATCH_BLOCK_WIDTH 32
#define ROWMATCH_BLOCK_HEIGHT 1
void __global__ RowMatch_Kernel(int*d_dot, int* d_result, int num2, float distmax, float ratiomax)
{
#if ROWMATCH_BLOCK_HEIGHT == 1
__shared__ int dotmax[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotnxt[ROWMATCH_BLOCK_WIDTH];
__shared__ int dotidx[ROWMATCH_BLOCK_WIDTH];
int row = blockIdx.y;
#else
__shared__ int x_dotmax[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotnxt[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
__shared__ int x_dotidx[ROWMATCH_BLOCK_HEIGHT][ROWMATCH_BLOCK_WIDTH];
int* dotmax = x_dotmax[threadIdx.y];
int* dotnxt = x_dotnxt[threadIdx.y];
int* dotidx = x_dotidx[threadIdx.y];
int row = IMUL(blockIdx.y, ROWMATCH_BLOCK_HEIGHT) + threadIdx.y;
#endif
int base_address = IMUL(row , num2);
int t_dotmax = 0, t_dotnxt = 0, t_dotidx = -1;
for(int i = 0; i < num2; i += ROWMATCH_BLOCK_WIDTH)
{
if(threadIdx.x + i < num2)
{
int v = tex1Dfetch(texDOT, base_address + threadIdx.x + i);//d_dot[base_address + threadIdx.x + i];//
bool test = v > t_dotmax;
t_dotnxt = test? t_dotmax : max(t_dotnxt, v);
t_dotidx = test? (threadIdx.x + i) : t_dotidx;
t_dotmax = test? v: t_dotmax;
}
__syncthreads();
}
dotmax[threadIdx.x] = t_dotmax;
dotnxt[threadIdx.x] = t_dotnxt;
dotidx[threadIdx.x] = t_dotidx;
__syncthreads();
#pragma unroll
for(int step = ROWMATCH_BLOCK_WIDTH/2; step >0; step /= 2)
{
if(threadIdx.x < step)
{
int v1 = dotmax[threadIdx.x], v2 = dotmax[threadIdx.x + step];
bool test = v2 > v1;
dotnxt[threadIdx.x] = test? max(v1, dotnxt[threadIdx.x + step]) :max(dotnxt[threadIdx.x], v2);
dotidx[threadIdx.x] = test? dotidx[threadIdx.x + step] : dotidx[threadIdx.x];
dotmax[threadIdx.x] = test? v2 : v1;
}
__syncthreads();
}
if(threadIdx.x == 0)
{
float dist = acos(min(dotmax[0] * 0.000003814697265625f, 1.0));
float distn = acos(min(dotnxt[0] * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[row] = (dist < distmax) && (dist < distn * ratiomax) ? dotidx[0] : -1;//? : -1;
}
}
void ProgramCU::GetRowMatch(CuTexImage* texDot, CuTexImage* texMatch, float distmax, float ratiomax)
{
int num1 = texDot->GetImgHeight();
int num2 = texDot->GetImgWidth();
dim3 grid(1, num1/ROWMATCH_BLOCK_HEIGHT);
dim3 block(ROWMATCH_BLOCK_WIDTH, ROWMATCH_BLOCK_HEIGHT);
texDot->BindTexture(texDOT);
RowMatch_Kernel<<<grid, block>>>((int*)texDot->_cuData,
(int*)texMatch->_cuData, num2, distmax, ratiomax);
}
#define COLMATCH_BLOCK_WIDTH 32
//texture<int3, 1, cudaReadModeElementType> texCT;
void __global__ ColMatch_Kernel(int3*d_crt, int* d_result, int height, int num2, float distmax, float ratiomax)
{
int col = COLMATCH_BLOCK_WIDTH * blockIdx.x + threadIdx.x;
if(col >= num2) return;
int3 result = d_crt[col];//tex1Dfetch(texCT, col);
int read_idx = col + num2;
for(int i = 1; i < height; ++i, read_idx += num2)
{
int3 temp = d_crt[read_idx];//tex1Dfetch(texCT, read_idx);
result = result.x < temp.x?
make_int3(temp.x, temp.y, max(result.x, temp.z)) :
make_int3(result.x, result.y, max(result.z, temp.x));
}
float dist = acos(min(result.x * 0.000003814697265625f, 1.0));
float distn = acos(min(result.z * 0.000003814697265625f, 1.0));
//float ratio = dist / distn;
d_result[col] = (dist < distmax) && (dist < distn * ratiomax) ? result.y : -1;//? : -1;
}
void ProgramCU::GetColMatch(CuTexImage* texCRT, CuTexImage* texMatch, float distmax, float ratiomax)
{
int height = texCRT->GetImgHeight();
int num2 = texCRT->GetImgWidth();
//texCRT->BindTexture(texCT);
dim3 grid((num2 + COLMATCH_BLOCK_WIDTH -1) / COLMATCH_BLOCK_WIDTH);
dim3 block(COLMATCH_BLOCK_WIDTH);
ColMatch_Kernel<<<grid, block>>>((int3*)texCRT->_cuData, (int*) texMatch->_cuData, height, num2, distmax, ratiomax);
}
#endif
|
435911c61b28f6985adfa7d2654ec3b97e8e6948.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaYUV.h"
#include "imageFormat.h"
//-----------------------------------------------------------------------------------
// YUV to RGB colorspace conversion
//-----------------------------------------------------------------------------------
static inline __device__ float clamp( float x )
{
return fminf(fmaxf(x, 0.0f), 255.0f);
}
static inline __device__ float3 YUV2RGB(float Y, float U, float V)
{
U -= 128.0f;
V -= 128.0f;
#if 1
return make_float3(clamp(Y + 1.4065f * V),
clamp(Y - 0.3455f * U - 0.7169f * V),
clamp(Y + 1.7790f * U));
#else
return make_float3(clamp(Y + 1.402f * V),
clamp(Y - 0.344f * U - 0.714f * V),
clamp(Y + 1.772f * U));
#endif
}
//-----------------------------------------------------------------------------------
// YUYV/UYVY are macropixel formats, and two RGB pixels are output at once.
// Define vectors with 6 and 8 elements so they can be written at one time.
// These are similar to those from cudaVector.h, except for 6/8 elements.
//-----------------------------------------------------------------------------------
struct /*__align__(6)*/ uchar6
{
uint8_t x0, y0, z0, x1, y1, z1;
};
struct __align__(8) uchar8
{
uint8_t x0, y0, z0, w0, x1, y1, z1, w1;
};
struct /*__align__(24)*/ float6
{
float x0, y0, z0, x1, y1, z1;
};
struct __align__(32) float8
{
float x0, y0, z0, w0, x1, y1, z1, w1;
};
template<class T> struct vecTypeInfo;
template<> struct vecTypeInfo<uchar6> { typedef uint8_t Base; };
template<> struct vecTypeInfo<uchar8> { typedef uint8_t Base; };
template<> struct vecTypeInfo<float6> { typedef float Base; };
template<> struct vecTypeInfo<float8> { typedef float Base; };
template<typename T> struct vec_assert_false : std::false_type { };
#define BaseType typename vecTypeInfo<T>::Base
template<typename T> inline __host__ __device__ T make_vec(BaseType x0, BaseType y0, BaseType z0, BaseType w0, BaseType x1, BaseType y1, BaseType z1, BaseType w1) { static_assert(vec_assert_false<T>::value, "invalid vector type - supported types are uchar6, uchar8, float6, float8"); }
template<> inline __host__ __device__ uchar6 make_vec( uint8_t x0, uint8_t y0, uint8_t z0, uint8_t w0, uint8_t x1, uint8_t y1, uint8_t z1, uint8_t w1 ) { return {x0, y0, z0, x1, y1, z1}; }
template<> inline __host__ __device__ uchar8 make_vec( uint8_t x0, uint8_t y0, uint8_t z0, uint8_t w0, uint8_t x1, uint8_t y1, uint8_t z1, uint8_t w1 ) { return {x0, y0, z0, w1, x1, y1, z1, w1}; }
template<> inline __host__ __device__ float6 make_vec( float x0, float y0, float z0, float w0, float x1, float y1, float z1, float w1 ) { return {x0, y0, z0, x1, y1, z1}; }
template<> inline __host__ __device__ float8 make_vec( float x0, float y0, float z0, float w0, float x1, float y1, float z1, float w1 ) { return {x0, y0, z0, w1, x1, y1, z1, w1}; }
//-----------------------------------------------------------------------------------
// YUYV/UYVY to RGBA
//-----------------------------------------------------------------------------------
template <typename T, imageFormat format>
__global__ void YUYVToRGBA( uchar4* src, T* dst, int halfWidth, int height )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= halfWidth || y >= height )
return;
const uchar4 macroPx = src[y * halfWidth + x];
// Y0 is the brightness of pixel 0, Y1 the brightness of pixel 1.
// U and V is the color of both pixels.
float y0, y1, u, v;
if( format == IMAGE_YUYV )
{
// YUYV [ Y0 | U0 | Y1 | V0 ]
y0 = macroPx.x;
y1 = macroPx.z;
u = macroPx.y;
v = macroPx.w;
}
else if( format == IMAGE_YVYU )
{
// YVYU [ Y0 | V0 | Y1 | U0 ]
y0 = macroPx.x;
y1 = macroPx.z;
u = macroPx.w;
v = macroPx.y;
}
else // if( format == IMAGE_UYVY )
{
// UYVY [ U0 | Y0 | V0 | Y1 ]
y0 = macroPx.y;
y1 = macroPx.w;
u = macroPx.x;
v = macroPx.z;
}
// this function outputs two pixels from one YUYV macropixel
const float3 px0 = YUV2RGB(y0, u, v);
const float3 px1 = YUV2RGB(y1, u, v);
dst[y * halfWidth + x] = make_vec<T>(px0.x, px0.y, px0.z, 255,
px1.x, px1.y, px1.z, 255);
}
template<typename T, imageFormat format>
static hipError_t launchYUYVToRGB( void* input, T* output, size_t width, size_t height)
{
if( !input || !output || !width || !height )
return hipErrorInvalidValue;
const int halfWidth = width / 2; // two pixels are output at once
const dim3 blockDim(8,8);
const dim3 gridDim(iDivUp(halfWidth, blockDim.x), iDivUp(height, blockDim.y));
hipLaunchKernelGGL(( YUYVToRGBA<T, format>), dim3(gridDim), dim3(blockDim), 0, 0, (uchar4*)input, output, halfWidth, height);
return CUDA(hipGetLastError());
}
// cudaYUYVToRGB (uchar3)
hipError_t cudaYUYVToRGB( void* input, uchar3* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar6, IMAGE_YUYV>(input, (uchar6*)output, width, height);
}
// cudaYUYVToRGB (float3)
hipError_t cudaYUYVToRGB( void* input, float3* output, size_t width, size_t height )
{
return launchYUYVToRGB<float6, IMAGE_YUYV>(input, (float6*)output, width, height);
}
// cudaYUYVToRGBA (uchar4)
hipError_t cudaYUYVToRGBA( void* input, uchar4* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar8, IMAGE_YUYV>(input, (uchar8*)output, width, height);
}
// cudaYUYVToRGBA (float4)
hipError_t cudaYUYVToRGBA( void* input, float4* output, size_t width, size_t height )
{
return launchYUYVToRGB<float8, IMAGE_YUYV>(input, (float8*)output, width, height);
}
//-----------------------------------------------------------------------------------
// cudaUYVYToRGB (uchar3)
hipError_t cudaUYVYToRGB( void* input, uchar3* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar6, IMAGE_UYVY>(input, (uchar6*)output, width, height);
}
// cudaUYVYToRGB (float3)
hipError_t cudaUYVYToRGB( void* input, float3* output, size_t width, size_t height )
{
return launchYUYVToRGB<float6, IMAGE_UYVY>(input, (float6*)output, width, height);
}
// cudaUYVYToRGBA (uchar4)
hipError_t cudaUYVYToRGBA( void* input, uchar4* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar8, IMAGE_UYVY>(input, (uchar8*)output, width, height);
}
// cudaUYVYToRGBA (float4)
hipError_t cudaUYVYToRGBA( void* input, float4* output, size_t width, size_t height )
{
return launchYUYVToRGB<float8, IMAGE_UYVY>(input, (float8*)output, width, height);
}
//-----------------------------------------------------------------------------------
// cudaYVYUToRGB (uchar3)
hipError_t cudaYVYUToRGB( void* input, uchar3* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar6, IMAGE_YVYU>(input, (uchar6*)output, width, height);
}
// cudaYUYVToRGB (float3)
hipError_t cudaYVYUToRGB( void* input, float3* output, size_t width, size_t height )
{
return launchYUYVToRGB<float6, IMAGE_YVYU>(input, (float6*)output, width, height);
}
// cudaYUYVToRGBA (uchar4)
hipError_t cudaYVYUToRGBA( void* input, uchar4* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar8, IMAGE_YVYU>(input, (uchar8*)output, width, height);
}
// cudaYUYVToRGBA (float4)
hipError_t cudaYVYUToRGBA( void* input, float4* output, size_t width, size_t height )
{
return launchYUYVToRGB<float8, IMAGE_YVYU>(input, (float8*)output, width, height);
}
| 435911c61b28f6985adfa7d2654ec3b97e8e6948.cu | /*
* Copyright (c) 2017, NVIDIA CORPORATION. All rights reserved.
*
* Permission is hereby granted, free of charge, to any person obtaining a
* copy of this software and associated documentation files (the "Software"),
* to deal in the Software without restriction, including without limitation
* the rights to use, copy, modify, merge, publish, distribute, sublicense,
* and/or sell copies of the Software, and to permit persons to whom the
* Software is furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
* THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
* DEALINGS IN THE SOFTWARE.
*/
#include "cudaYUV.h"
#include "imageFormat.h"
//-----------------------------------------------------------------------------------
// YUV to RGB colorspace conversion
//-----------------------------------------------------------------------------------
static inline __device__ float clamp( float x )
{
return fminf(fmaxf(x, 0.0f), 255.0f);
}
static inline __device__ float3 YUV2RGB(float Y, float U, float V)
{
U -= 128.0f;
V -= 128.0f;
#if 1
return make_float3(clamp(Y + 1.4065f * V),
clamp(Y - 0.3455f * U - 0.7169f * V),
clamp(Y + 1.7790f * U));
#else
return make_float3(clamp(Y + 1.402f * V),
clamp(Y - 0.344f * U - 0.714f * V),
clamp(Y + 1.772f * U));
#endif
}
//-----------------------------------------------------------------------------------
// YUYV/UYVY are macropixel formats, and two RGB pixels are output at once.
// Define vectors with 6 and 8 elements so they can be written at one time.
// These are similar to those from cudaVector.h, except for 6/8 elements.
//-----------------------------------------------------------------------------------
struct /*__align__(6)*/ uchar6
{
uint8_t x0, y0, z0, x1, y1, z1;
};
struct __align__(8) uchar8
{
uint8_t x0, y0, z0, w0, x1, y1, z1, w1;
};
struct /*__align__(24)*/ float6
{
float x0, y0, z0, x1, y1, z1;
};
struct __align__(32) float8
{
float x0, y0, z0, w0, x1, y1, z1, w1;
};
template<class T> struct vecTypeInfo;
template<> struct vecTypeInfo<uchar6> { typedef uint8_t Base; };
template<> struct vecTypeInfo<uchar8> { typedef uint8_t Base; };
template<> struct vecTypeInfo<float6> { typedef float Base; };
template<> struct vecTypeInfo<float8> { typedef float Base; };
template<typename T> struct vec_assert_false : std::false_type { };
#define BaseType typename vecTypeInfo<T>::Base
template<typename T> inline __host__ __device__ T make_vec(BaseType x0, BaseType y0, BaseType z0, BaseType w0, BaseType x1, BaseType y1, BaseType z1, BaseType w1) { static_assert(vec_assert_false<T>::value, "invalid vector type - supported types are uchar6, uchar8, float6, float8"); }
template<> inline __host__ __device__ uchar6 make_vec( uint8_t x0, uint8_t y0, uint8_t z0, uint8_t w0, uint8_t x1, uint8_t y1, uint8_t z1, uint8_t w1 ) { return {x0, y0, z0, x1, y1, z1}; }
template<> inline __host__ __device__ uchar8 make_vec( uint8_t x0, uint8_t y0, uint8_t z0, uint8_t w0, uint8_t x1, uint8_t y1, uint8_t z1, uint8_t w1 ) { return {x0, y0, z0, w1, x1, y1, z1, w1}; }
template<> inline __host__ __device__ float6 make_vec( float x0, float y0, float z0, float w0, float x1, float y1, float z1, float w1 ) { return {x0, y0, z0, x1, y1, z1}; }
template<> inline __host__ __device__ float8 make_vec( float x0, float y0, float z0, float w0, float x1, float y1, float z1, float w1 ) { return {x0, y0, z0, w1, x1, y1, z1, w1}; }
//-----------------------------------------------------------------------------------
// YUYV/UYVY to RGBA
//-----------------------------------------------------------------------------------
template <typename T, imageFormat format>
__global__ void YUYVToRGBA( uchar4* src, T* dst, int halfWidth, int height )
{
const int x = blockIdx.x * blockDim.x + threadIdx.x;
const int y = blockIdx.y * blockDim.y + threadIdx.y;
if( x >= halfWidth || y >= height )
return;
const uchar4 macroPx = src[y * halfWidth + x];
// Y0 is the brightness of pixel 0, Y1 the brightness of pixel 1.
// U and V is the color of both pixels.
float y0, y1, u, v;
if( format == IMAGE_YUYV )
{
// YUYV [ Y0 | U0 | Y1 | V0 ]
y0 = macroPx.x;
y1 = macroPx.z;
u = macroPx.y;
v = macroPx.w;
}
else if( format == IMAGE_YVYU )
{
// YVYU [ Y0 | V0 | Y1 | U0 ]
y0 = macroPx.x;
y1 = macroPx.z;
u = macroPx.w;
v = macroPx.y;
}
else // if( format == IMAGE_UYVY )
{
// UYVY [ U0 | Y0 | V0 | Y1 ]
y0 = macroPx.y;
y1 = macroPx.w;
u = macroPx.x;
v = macroPx.z;
}
// this function outputs two pixels from one YUYV macropixel
const float3 px0 = YUV2RGB(y0, u, v);
const float3 px1 = YUV2RGB(y1, u, v);
dst[y * halfWidth + x] = make_vec<T>(px0.x, px0.y, px0.z, 255,
px1.x, px1.y, px1.z, 255);
}
template<typename T, imageFormat format>
static cudaError_t launchYUYVToRGB( void* input, T* output, size_t width, size_t height)
{
if( !input || !output || !width || !height )
return cudaErrorInvalidValue;
const int halfWidth = width / 2; // two pixels are output at once
const dim3 blockDim(8,8);
const dim3 gridDim(iDivUp(halfWidth, blockDim.x), iDivUp(height, blockDim.y));
YUYVToRGBA<T, format><<<gridDim, blockDim>>>((uchar4*)input, output, halfWidth, height);
return CUDA(cudaGetLastError());
}
// cudaYUYVToRGB (uchar3)
cudaError_t cudaYUYVToRGB( void* input, uchar3* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar6, IMAGE_YUYV>(input, (uchar6*)output, width, height);
}
// cudaYUYVToRGB (float3)
cudaError_t cudaYUYVToRGB( void* input, float3* output, size_t width, size_t height )
{
return launchYUYVToRGB<float6, IMAGE_YUYV>(input, (float6*)output, width, height);
}
// cudaYUYVToRGBA (uchar4)
cudaError_t cudaYUYVToRGBA( void* input, uchar4* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar8, IMAGE_YUYV>(input, (uchar8*)output, width, height);
}
// cudaYUYVToRGBA (float4)
cudaError_t cudaYUYVToRGBA( void* input, float4* output, size_t width, size_t height )
{
return launchYUYVToRGB<float8, IMAGE_YUYV>(input, (float8*)output, width, height);
}
//-----------------------------------------------------------------------------------
// cudaUYVYToRGB (uchar3)
cudaError_t cudaUYVYToRGB( void* input, uchar3* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar6, IMAGE_UYVY>(input, (uchar6*)output, width, height);
}
// cudaUYVYToRGB (float3)
cudaError_t cudaUYVYToRGB( void* input, float3* output, size_t width, size_t height )
{
return launchYUYVToRGB<float6, IMAGE_UYVY>(input, (float6*)output, width, height);
}
// cudaUYVYToRGBA (uchar4)
cudaError_t cudaUYVYToRGBA( void* input, uchar4* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar8, IMAGE_UYVY>(input, (uchar8*)output, width, height);
}
// cudaUYVYToRGBA (float4)
cudaError_t cudaUYVYToRGBA( void* input, float4* output, size_t width, size_t height )
{
return launchYUYVToRGB<float8, IMAGE_UYVY>(input, (float8*)output, width, height);
}
//-----------------------------------------------------------------------------------
// cudaYVYUToRGB (uchar3)
cudaError_t cudaYVYUToRGB( void* input, uchar3* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar6, IMAGE_YVYU>(input, (uchar6*)output, width, height);
}
// cudaYUYVToRGB (float3)
cudaError_t cudaYVYUToRGB( void* input, float3* output, size_t width, size_t height )
{
return launchYUYVToRGB<float6, IMAGE_YVYU>(input, (float6*)output, width, height);
}
// cudaYUYVToRGBA (uchar4)
cudaError_t cudaYVYUToRGBA( void* input, uchar4* output, size_t width, size_t height )
{
return launchYUYVToRGB<uchar8, IMAGE_YVYU>(input, (uchar8*)output, width, height);
}
// cudaYUYVToRGBA (float4)
cudaError_t cudaYVYUToRGBA( void* input, float4* output, size_t width, size_t height )
{
return launchYUYVToRGB<float8, IMAGE_YVYU>(input, (float8*)output, width, height);
}
|
0b9eb4b9cbd993a28f3ec03d432819294dcfba8c.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdlib>
#include <iostream>
#include "hip/hip_runtime.h"
#include <ctime>
using namespace std;
#define NUM_ELEMENTS 512 * 1000
__global__ void vecAddDevice(float * A, float * B, float * C) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main() {
float * hA, * hB, * hC;
float * dA, * dB, * dC;
int size = NUM_ELEMENTS * sizeof(float);
int device;
char ch;
hipDeviceProp_t deviceProperties;
hA = new float[NUM_ELEMENTS];
hB = new float[NUM_ELEMENTS];
hC = new float[NUM_ELEMENTS];
// get device properties
hipGetDevice(&device);
hipGetDeviceProperties(&deviceProperties, device);
cout << "Multiprocessors count: " << deviceProperties.multiProcessorCount << endl;
cout << "Warp size: " << deviceProperties.warpSize << endl;
cout << "Max Threads per Block: " << deviceProperties.maxThreadsPerBlock << endl;
int numBlocks = NUM_ELEMENTS / deviceProperties.maxThreadsPerBlock;
int threadsPerBlock = deviceProperties.maxThreadsPerBlock;
// init vectors
for(int i = 0; i < NUM_ELEMENTS; i++) {
hA[i] = rand() / (float) RAND_MAX;
hB[i] = rand() / (float) RAND_MAX;
hC[i] = 0.0f;
}
cout << "Allocate device memory..." << endl;
// allocate device memory
hipMalloc(&dA, size);
hipMalloc(&dB, size);
hipMalloc(&dC, size);
// copy data to device memory
hipMemcpy(dA, hA, size, hipMemcpyHostToDevice);
hipMemcpy(dB, hB, size, hipMemcpyHostToDevice);
cout << "Starting kernel..." << endl <<
"Blocks: " << numBlocks << endl <<
"Threads per block: " << threadsPerBlock << endl;
clock_t t1 = clock();
hipLaunchKernelGGL(( vecAddDevice), dim3(numBlocks), dim3(threadsPerBlock), 0, 0, dA, dB, dC);
hipError_t e = hipDeviceSynchronize();
if(e == hipSuccess)
cout << "Done." << endl;
else
cout << "Error: " << hipGetErrorString(e) << endl;
clock_t t2 = clock() - t1;
double t = ((double)t2 / CLOCKS_PER_SEC * 1000.0);
cout << "Time elapsed: " << t << " ms" << endl;
hipMemcpy(hC, dC, size, hipMemcpyDeviceToHost);
cout << "Freeing device memory..." << endl;
// free device memory
hipFree(dA);
hipFree(dB);
hipFree(dC);
cin >> ch;
return 0;
}
| 0b9eb4b9cbd993a28f3ec03d432819294dcfba8c.cu | #include <cstdlib>
#include <iostream>
#include "cuda_runtime.h"
#include <ctime>
using namespace std;
#define NUM_ELEMENTS 512 * 1000
__global__ void vecAddDevice(float * A, float * B, float * C) {
int i = blockDim.x * blockIdx.x + threadIdx.x;
C[i] = A[i] + B[i];
}
int main() {
float * hA, * hB, * hC;
float * dA, * dB, * dC;
int size = NUM_ELEMENTS * sizeof(float);
int device;
char ch;
cudaDeviceProp deviceProperties;
hA = new float[NUM_ELEMENTS];
hB = new float[NUM_ELEMENTS];
hC = new float[NUM_ELEMENTS];
// get device properties
cudaGetDevice(&device);
cudaGetDeviceProperties(&deviceProperties, device);
cout << "Multiprocessors count: " << deviceProperties.multiProcessorCount << endl;
cout << "Warp size: " << deviceProperties.warpSize << endl;
cout << "Max Threads per Block: " << deviceProperties.maxThreadsPerBlock << endl;
int numBlocks = NUM_ELEMENTS / deviceProperties.maxThreadsPerBlock;
int threadsPerBlock = deviceProperties.maxThreadsPerBlock;
// init vectors
for(int i = 0; i < NUM_ELEMENTS; i++) {
hA[i] = rand() / (float) RAND_MAX;
hB[i] = rand() / (float) RAND_MAX;
hC[i] = 0.0f;
}
cout << "Allocate device memory..." << endl;
// allocate device memory
cudaMalloc(&dA, size);
cudaMalloc(&dB, size);
cudaMalloc(&dC, size);
// copy data to device memory
cudaMemcpy(dA, hA, size, cudaMemcpyHostToDevice);
cudaMemcpy(dB, hB, size, cudaMemcpyHostToDevice);
cout << "Starting kernel..." << endl <<
"Blocks: " << numBlocks << endl <<
"Threads per block: " << threadsPerBlock << endl;
clock_t t1 = clock();
vecAddDevice<<<numBlocks, threadsPerBlock>>>(dA, dB, dC);
cudaError_t e = cudaThreadSynchronize();
if(e == cudaSuccess)
cout << "Done." << endl;
else
cout << "Error: " << cudaGetErrorString(e) << endl;
clock_t t2 = clock() - t1;
double t = ((double)t2 / CLOCKS_PER_SEC * 1000.0);
cout << "Time elapsed: " << t << " ms" << endl;
cudaMemcpy(hC, dC, size, cudaMemcpyDeviceToHost);
cout << "Freeing device memory..." << endl;
// free device memory
cudaFree(dA);
cudaFree(dB);
cudaFree(dC);
cin >> ch;
return 0;
}
|
bdc17f5a28fe8b2d1096f463c58c35d1ce26d85e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
using namespace std;
__global__ void printArray(int *index, int outputsize) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < outputsize) {
printf(" %d", index[i]);
}
}
__global__ void printArray(double *index, int outputsize) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < outputsize) {
printf(" %f", index[i]);
}
}
__global__ void Count_number_of_term(int *A, int *Df) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int value = A[i] - 1;
atomicAdd(&Df[value], 1);
}
__global__ void Kogge_Stone_scan_kernel(int *df, int *index, int InputSize, int thread_num, int *temp) {
__shared__ int XY[100];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < InputSize) {
XY[threadIdx.x] = df[i];
__syncthreads();
}
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if (threadIdx.x >= stride) {
XY[threadIdx.x] += XY[threadIdx.x - stride];
}
}
if (i < thread_num-1) {
index[i + 1] = XY[threadIdx.x];
}
__syncthreads();
}
__global__ void Create_InvertedIndexA (int *A, int *B, int *Df, int *Index ,int *InvertedIndexA) {
int temp = Index[threadIdx.x] + Df[threadIdx.x];
if (blockIdx.x == 0) {
for (unsigned int i = Index[threadIdx.x]; i < temp; i++) {
__syncthreads();
InvertedIndexA[i] = threadIdx.x + 1;
}
}
}
__global__ void Create_InvertedIndexB(int *A, int *B, double *C, int *Df, int *Index, int *InvertedIndexB, double *InvertedIndexC) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
int temp = 0;
temp = Index[B[i]-1];
__syncthreads();
int value = B[i] - 1;
int a = 0;
a=atomicAdd(&Index[value], 1);
InvertedIndexB[a] = A[i];
InvertedIndexC[a] = C[i];
__syncthreads();
}
// KNN Start
__global__ void knnTD(int *terms, double *qnorm, int *invertedIndex, double *norms, double *docs, int *index) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0;
for (int j = index[terms[i]]; j < index[terms[i]+1]; j++) {
docs[invertedIndex[j]]+=qnorm[i]*norms[j];
__syncthreads();
}
}
__global__ void knn(int* terms, double *qnorm, double *docs, double *docNorm, double queryNorm) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(docNorm[i] == 0 || queryNorm == 0) {
docs[i] = 0;
} else {
docs[i] = docs[i]/(docNorm[i]*queryNorm);
}
}
__global__ void printlist(int *terms, int *invertedIndex, int *index) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("Term -> %d\n", index[terms[i]]);
for (int j = index[terms[i]]; j < index[terms[i]+1]; j++) {
printf("%d -> %d\n", terms[i], invertedIndex[j]);
}
printf("\n");
}
__global__ void getDocNorm(int *docs, int *terms, double *norms, double *dn, int num) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
for (size_t j = 0; j < num; j++) {
if(docs[j] == i) {
temp+=(norms[j]*norms[j]);
}
}
__syncthreads();
dn[i] = sqrt(temp);
}
__global__ void oddEvenSort(double *data, int *dl, int num_elem) {
int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
int tid_idx;
int offset = 0; //Start off with even, then odd
int num_swaps;
dl[tid] = tid+1;
__syncthreads();
//Calculation maximum index for a given block
//Last block it is number of elements minus one
//Other blocks to end of block minus one
int tid_idx_max = min((((blockIdx.x + 1)*(blockDim.x * 2)) - 1), (num_elem - 1));
do
{
//Reset number of swaps
num_swaps = 0;
//work out index of data
tid_idx = (tid * 2) + offset;
//If no array or block overrun
if (tid_idx < tid_idx_max) {
//Read values into registers
double d0 = data[tid_idx];
int db0 = dl[tid_idx];
double d1 = data[tid_idx + 1];
int db1 = dl[tid_idx + 1];
//Compare registers
if (d0 < d1) {
//Swap values if needed
data[tid_idx] = d1;
dl[tid_idx] = db1;
data[tid_idx + 1] = d0;
dl[tid_idx + 1] = db0;
//keep track that we did a swap
num_swaps++;
}
}
//Switch from even to off, or odd to even
if (offset == 0) {
offset = 1;
}
else {
offset = 0;
}
} while (__syncthreads_count(num_swaps) != 0);
}
__global__ void classify(int *dl, double *dn, int k, double *dc, int *u) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t j = 0; j < k; j++) {
if(dl[j]/10 == u[i]) {
dc[i] += dn[j];
}
}
}
// KNN end
int main(int argc, char **argv)
{
int k;
printf("%d\n", argc);
if(argc < 2) {
k = 3;
} else {
k = atoi(argv[1]);
}
// Read term-doc pairs
ifstream ifs("result_norm.txt");
string text;
text.assign( (istreambuf_iterator<char>(ifs) ),
(istreambuf_iterator<char>() ) );
char arr[text.size()+1];
strcpy(arr,text.c_str());
vector<char*> v;
vector<int> d1;
vector<int> t1;
vector<double> n1;
char* chars_array = strtok(arr, "[");
while(chars_array) {
v.push_back(chars_array);
chars_array = strtok(NULL, "[");
}
bool firstTerm = true, firstNorm = true;
for(size_t n = 0; n < v.size(); ++n)
{
char* subchar_array = strtok(v[n], ",");
while (subchar_array) {
if (n == 0) {
d1.push_back(atoi(subchar_array));
} else if (n == 1) {
if (firstTerm){
d1.pop_back();
firstTerm = false;
}
t1.push_back(atoi(subchar_array));
} else if (n == 2) {
if (firstNorm){
t1.pop_back();
firstNorm = false;
}
if (n1.size() == d1.size())
break;
n1.push_back(atof(subchar_array));
}
subchar_array = strtok(NULL, ",");
}
}
int d[d1.size()];
int t[t1.size()];
double n[n1.size()];
copy(d1.begin(), d1.end(), d);
copy(t1.begin(), t1.end(), t);
copy(n1.begin(), n1.end(), n);
/*
for (size_t i = 0; i < t1.size(); i++) {
printf("%d -> [%d,%d,%f]\n",i,d[i],t[i],n[i]);
}
*/
// Begin InvertedIndex algorithm
int numDocs = d[d1.size()-1];
const int arraySize = sizeof(d)/sizeof(int);
printf("ArraySize: %d\n", arraySize);
const int number_term = 7;
int Df[number_term] = { 0 };
int Index[number_term] = { 0 };
vector<int> IA(arraySize,0);
vector<int> IB(arraySize,0);
vector<double> IC(arraySize,0);
int InvertedIndexA[arraySize];//output
int InvertedIndexB[arraySize];//output
double InvertedIndexC[arraySize];//output
copy(IA.begin(),IA.end(),InvertedIndexA);
copy(IB.begin(),IB.end(),InvertedIndexB);
copy(IC.begin(),IC.end(),InvertedIndexC);
printf("A: %d\n", sizeof(InvertedIndexA)/sizeof(int));
int thread_num = d[arraySize - 1];
int blocks = (arraySize / thread_num) + (arraySize % thread_num != 0 ? 1 : 0);
printf("blocks = %d\n", blocks);
int *a, *b, *df, *index, *invertedIndexA, *invertedIndexB;
double *c, *invertedIndexC, *dn;
double docNorms[numDocs];
hipMallocManaged(&a, sizeof(d));
hipMallocManaged(&b, sizeof(t));
hipMallocManaged(&c, sizeof(n));
hipMallocManaged(&df, sizeof(Df));
hipMallocManaged(&index, sizeof(Index));
hipMallocManaged(&invertedIndexA, sizeof(InvertedIndexA));
hipMallocManaged(&invertedIndexB, sizeof(InvertedIndexB));
hipMallocManaged(&invertedIndexC, sizeof(InvertedIndexC));
hipMallocManaged(&dn,sizeof(docNorms));
hipMemcpy(a, d, sizeof(d), hipMemcpyHostToDevice);
hipMemcpy(b, t, sizeof(t), hipMemcpyHostToDevice);
hipMemcpy(c, n, sizeof(n), hipMemcpyHostToDevice);
hipMemcpy(df, Df, sizeof(Df), hipMemcpyHostToDevice);
hipMemcpy(index, Index, sizeof(Index), hipMemcpyHostToDevice);
hipMemcpy(invertedIndexA, InvertedIndexA, sizeof(InvertedIndexA), hipMemcpyHostToDevice);
hipMemcpy(invertedIndexB, InvertedIndexB, sizeof(InvertedIndexB), hipMemcpyHostToDevice);
hipMemcpy(invertedIndexC, InvertedIndexC, sizeof(InvertedIndexC), hipMemcpyHostToDevice);
hipMemcpy(dn,docNorms,sizeof(docNorms),hipMemcpyHostToDevice);
int Temp[number_term] = { 0 };int *temp;
hipMallocManaged(&temp, sizeof(Temp));
hipMemcpy(temp, Temp, sizeof(Temp), hipMemcpyHostToDevice);
printf("Initial Array:\n");
printf("d:");
hipLaunchKernelGGL(( printArray) , dim3(1), dim3(arraySize), 0, 0, a, sizeof(d) / sizeof(int));
hipDeviceSynchronize();
printf("\n");
printf("t:");
hipLaunchKernelGGL(( printArray) , dim3(1), dim3(arraySize) , 0, 0, b, sizeof(t) / sizeof(int));
hipDeviceSynchronize();
printf("\n");
printf("Count_number_of_term: \n");
hipLaunchKernelGGL(( Count_number_of_term) , dim3(blocks), dim3(thread_num), 0, 0, b,df);
hipDeviceSynchronize();
hipLaunchKernelGGL(( printArray) , dim3(1), dim3(thread_num), 0, 0, df, sizeof(Df) / sizeof(int));
hipDeviceSynchronize();
printf("\n");
printf("Execute the prefix sum by Kogge Stone:\n");
hipLaunchKernelGGL(( Kogge_Stone_scan_kernel) , dim3(blocks), dim3(thread_num), 0, 0, df, index, arraySize, thread_num, temp);
hipDeviceSynchronize();
//printf("Input count number array to the Kogge Stone:\n");
hipLaunchKernelGGL(( printArray) , dim3(1), dim3(arraySize) , 0, 0, index, sizeof(Index) / sizeof(int));
hipDeviceSynchronize();
printf("\n");
printf("InvertedIndex Array:\n");
hipLaunchKernelGGL(( Create_InvertedIndexA) , dim3(1), dim3(thread_num) , 0, 0, a, b, df, index, invertedIndexA);
hipDeviceSynchronize();
printf("Terms: \n");
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexA[j]);
}
printf("\n\n");
printf("Documents: \n");
hipLaunchKernelGGL(( Create_InvertedIndexB) , dim3(blocks), dim3(thread_num) , 0, 0, a, b, c, df, index, invertedIndexB, invertedIndexC);
hipDeviceSynchronize();
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexB[j]);
}
printf("\n\n");
printf("Norms: \n");
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexB[j]);
}
printf("\n\n");
hipLaunchKernelGGL(( getDocNorm), dim3(1),dim3(numDocs), 0, 0, a,b,c,dn,d1.size());
hipDeviceSynchronize();
//Start Querying
ifstream ifq("querydoc.txt");
string qur;
qur.assign( (istreambuf_iterator<char>(ifq) ),
(istreambuf_iterator<char>() ) );
char qarr[qur.size()+1];
strcpy(qarr,qur.c_str());
vector<char*> vq;
vector<int> tq;
vector<double> tf;
char* query_array = strtok(qarr, "[");
while(query_array) {
vq.push_back(query_array);
query_array = strtok(NULL, "[");
}
for(size_t n = 0; n < vq.size(); ++n)
{
char* subchar_array = strtok(vq[n], ",");
while (subchar_array) {
if (n == 0)
tq.push_back(atoi(subchar_array));
else if (n == 1)
tf.push_back(atof(subchar_array));
subchar_array = strtok(NULL, ",");
}
}
int q_size = tq.size();
int qterm[q_size];
double sum[q_size];
double qtermfreq[tf.size()];
copy(tq.begin(), tq.end(), qterm);
copy(tf.begin(), tf.end(), qtermfreq);
int *qtptr;
double *qfptr, *ds;
double docSums[numDocs];
hipMallocManaged(&qtptr,q_size*sizeof(int));
hipMallocManaged(&qfptr,q_size*sizeof(double));
hipMallocManaged(&ds,sizeof(docSums));
hipMemcpy(qtptr,qterm,q_size *sizeof(int),hipMemcpyHostToDevice);
hipMemcpy(qfptr,qtermfreq,q_size *sizeof(double),hipMemcpyHostToDevice);
hipMemcpy(ds,docSums,sizeof(docSums),hipMemcpyHostToDevice);
double q_norm = 0;
for (size_t j = 0; j < q_size; j++) {
q_norm+=(qtermfreq[j]*qtermfreq[j]);
}
q_norm = sqrt(q_norm);
hipLaunchKernelGGL(( knnTD), dim3(1),dim3(q_size), 0, 0, qtptr,qfptr,invertedIndexB,invertedIndexC,ds,index);
hipDeviceSynchronize();
hipLaunchKernelGGL(( knn), dim3(1),dim3(numDocs), 0, 0, qtptr,qfptr,ds,dn,q_norm);
hipDeviceSynchronize();
/*
printf("\n\nDoc Distances:\n");
for (size_t j = 0; j < numDocs; j++) {
printf(" %d -> %f\n",j+1,ds[j]);
}
*/
int docLabel[numDocs];
int *dl;
hipMallocManaged(&dl,sizeof(docLabel));
hipMemcpy(dl,docLabel,sizeof(docLabel),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( oddEvenSort), dim3(1),dim3(numDocs), 0, 0, ds,dl,numDocs);
hipDeviceSynchronize();
vector<int> nn;
printf("\nK Nearest Neighbors (k=%d): \n", k);
for (size_t j = 0; j < k; j++) {
if (find(nn.begin(), nn.end(), dl[j]) != nn.end()) {
} else {
nn.push_back(dl[j]/10);
}
printf(" %d -> %f -> label = %d\n", dl[j],ds[j],dl[j]/10);
}
int uniqueN[nn.size()];
copy(nn.begin(), nn.end(), uniqueN);
double kCount[nn.size()];
double *dc;
int *u;
hipMallocManaged(&dc,sizeof(kCount));
hipMallocManaged(&u,sizeof(uniqueN));
hipMemcpy(dc,kCount,sizeof(kCount),hipMemcpyHostToDevice);
hipMemcpy(u,uniqueN,sizeof(uniqueN),hipMemcpyHostToDevice);
hipLaunchKernelGGL(( classify), dim3(1),dim3(nn.size()), 0, 0, dl,ds,k,dc,u);
hipDeviceSynchronize();
double max = 0;
int max_i = 0;
for (size_t j = 0; j < nn.size(); j++) {
if(dc[j] > max) {
max = dc[j];
max_i = j;
}
}
printf("\nQuery Document is labelled = %d\n", u[max_i]);
printf("\n");
hipFree(a);
hipFree(b);
hipFree(c);
hipFree(df);
hipFree(index);
hipFree(invertedIndexA);
hipFree(invertedIndexB);
hipFree(invertedIndexC);
hipFree(qtptr);
hipFree(qfptr);
hipFree(ds);
hipFree(dn);
hipFree(dl);
hipFree(dc);
return 0;
}
| bdc17f5a28fe8b2d1096f463c58c35d1ce26d85e.cu | #include <stdio.h>
#include <string>
#include <iostream>
#include <fstream>
#include <vector>
#include <algorithm>
using namespace std;
__global__ void printArray(int *index, int outputsize) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < outputsize) {
printf(" %d", index[i]);
}
}
__global__ void printArray(double *index, int outputsize) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < outputsize) {
printf(" %f", index[i]);
}
}
__global__ void Count_number_of_term(int *A, int *Df) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
int value = A[i] - 1;
atomicAdd(&Df[value], 1);
}
__global__ void Kogge_Stone_scan_kernel(int *df, int *index, int InputSize, int thread_num, int *temp) {
__shared__ int XY[100];
int i = blockIdx.x * blockDim.x + threadIdx.x;
if (i < InputSize) {
XY[threadIdx.x] = df[i];
__syncthreads();
}
for (unsigned int stride = 1; stride < blockDim.x; stride *= 2) {
__syncthreads();
if (threadIdx.x >= stride) {
XY[threadIdx.x] += XY[threadIdx.x - stride];
}
}
if (i < thread_num-1) {
index[i + 1] = XY[threadIdx.x];
}
__syncthreads();
}
__global__ void Create_InvertedIndexA (int *A, int *B, int *Df, int *Index ,int *InvertedIndexA) {
int temp = Index[threadIdx.x] + Df[threadIdx.x];
if (blockIdx.x == 0) {
for (unsigned int i = Index[threadIdx.x]; i < temp; i++) {
__syncthreads();
InvertedIndexA[i] = threadIdx.x + 1;
}
}
}
__global__ void Create_InvertedIndexB(int *A, int *B, double *C, int *Df, int *Index, int *InvertedIndexB, double *InvertedIndexC) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
__syncthreads();
int temp = 0;
temp = Index[B[i]-1];
__syncthreads();
int value = B[i] - 1;
int a = 0;
a=atomicAdd(&Index[value], 1);
InvertedIndexB[a] = A[i];
InvertedIndexC[a] = C[i];
__syncthreads();
}
// KNN Start
__global__ void knnTD(int *terms, double *qnorm, int *invertedIndex, double *norms, double *docs, int *index) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0;
for (int j = index[terms[i]]; j < index[terms[i]+1]; j++) {
docs[invertedIndex[j]]+=qnorm[i]*norms[j];
__syncthreads();
}
}
__global__ void knn(int* terms, double *qnorm, double *docs, double *docNorm, double queryNorm) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
if(docNorm[i] == 0 || queryNorm == 0) {
docs[i] = 0;
} else {
docs[i] = docs[i]/(docNorm[i]*queryNorm);
}
}
__global__ void printlist(int *terms, int *invertedIndex, int *index) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
printf("Term -> %d\n", index[terms[i]]);
for (int j = index[terms[i]]; j < index[terms[i]+1]; j++) {
printf("%d -> %d\n", terms[i], invertedIndex[j]);
}
printf("\n");
}
__global__ void getDocNorm(int *docs, int *terms, double *norms, double *dn, int num) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
double temp = 0.0;
for (size_t j = 0; j < num; j++) {
if(docs[j] == i) {
temp+=(norms[j]*norms[j]);
}
}
__syncthreads();
dn[i] = sqrt(temp);
}
__global__ void oddEvenSort(double *data, int *dl, int num_elem) {
int tid = (blockIdx.x*blockDim.x) + threadIdx.x;
int tid_idx;
int offset = 0; //Start off with even, then odd
int num_swaps;
dl[tid] = tid+1;
__syncthreads();
//Calculation maximum index for a given block
//Last block it is number of elements minus one
//Other blocks to end of block minus one
int tid_idx_max = min((((blockIdx.x + 1)*(blockDim.x * 2)) - 1), (num_elem - 1));
do
{
//Reset number of swaps
num_swaps = 0;
//work out index of data
tid_idx = (tid * 2) + offset;
//If no array or block overrun
if (tid_idx < tid_idx_max) {
//Read values into registers
double d0 = data[tid_idx];
int db0 = dl[tid_idx];
double d1 = data[tid_idx + 1];
int db1 = dl[tid_idx + 1];
//Compare registers
if (d0 < d1) {
//Swap values if needed
data[tid_idx] = d1;
dl[tid_idx] = db1;
data[tid_idx + 1] = d0;
dl[tid_idx + 1] = db0;
//keep track that we did a swap
num_swaps++;
}
}
//Switch from even to off, or odd to even
if (offset == 0) {
offset = 1;
}
else {
offset = 0;
}
} while (__syncthreads_count(num_swaps) != 0);
}
__global__ void classify(int *dl, double *dn, int k, double *dc, int *u) {
int i = blockIdx.x * blockDim.x + threadIdx.x;
for (size_t j = 0; j < k; j++) {
if(dl[j]/10 == u[i]) {
dc[i] += dn[j];
}
}
}
// KNN end
int main(int argc, char **argv)
{
int k;
printf("%d\n", argc);
if(argc < 2) {
k = 3;
} else {
k = atoi(argv[1]);
}
// Read term-doc pairs
ifstream ifs("result_norm.txt");
string text;
text.assign( (istreambuf_iterator<char>(ifs) ),
(istreambuf_iterator<char>() ) );
char arr[text.size()+1];
strcpy(arr,text.c_str());
vector<char*> v;
vector<int> d1;
vector<int> t1;
vector<double> n1;
char* chars_array = strtok(arr, "[");
while(chars_array) {
v.push_back(chars_array);
chars_array = strtok(NULL, "[");
}
bool firstTerm = true, firstNorm = true;
for(size_t n = 0; n < v.size(); ++n)
{
char* subchar_array = strtok(v[n], ",");
while (subchar_array) {
if (n == 0) {
d1.push_back(atoi(subchar_array));
} else if (n == 1) {
if (firstTerm){
d1.pop_back();
firstTerm = false;
}
t1.push_back(atoi(subchar_array));
} else if (n == 2) {
if (firstNorm){
t1.pop_back();
firstNorm = false;
}
if (n1.size() == d1.size())
break;
n1.push_back(atof(subchar_array));
}
subchar_array = strtok(NULL, ",");
}
}
int d[d1.size()];
int t[t1.size()];
double n[n1.size()];
copy(d1.begin(), d1.end(), d);
copy(t1.begin(), t1.end(), t);
copy(n1.begin(), n1.end(), n);
/*
for (size_t i = 0; i < t1.size(); i++) {
printf("%d -> [%d,%d,%f]\n",i,d[i],t[i],n[i]);
}
*/
// Begin InvertedIndex algorithm
int numDocs = d[d1.size()-1];
const int arraySize = sizeof(d)/sizeof(int);
printf("ArraySize: %d\n", arraySize);
const int number_term = 7;
int Df[number_term] = { 0 };
int Index[number_term] = { 0 };
vector<int> IA(arraySize,0);
vector<int> IB(arraySize,0);
vector<double> IC(arraySize,0);
int InvertedIndexA[arraySize];//output
int InvertedIndexB[arraySize];//output
double InvertedIndexC[arraySize];//output
copy(IA.begin(),IA.end(),InvertedIndexA);
copy(IB.begin(),IB.end(),InvertedIndexB);
copy(IC.begin(),IC.end(),InvertedIndexC);
printf("A: %d\n", sizeof(InvertedIndexA)/sizeof(int));
int thread_num = d[arraySize - 1];
int blocks = (arraySize / thread_num) + (arraySize % thread_num != 0 ? 1 : 0);
printf("blocks = %d\n", blocks);
int *a, *b, *df, *index, *invertedIndexA, *invertedIndexB;
double *c, *invertedIndexC, *dn;
double docNorms[numDocs];
cudaMallocManaged(&a, sizeof(d));
cudaMallocManaged(&b, sizeof(t));
cudaMallocManaged(&c, sizeof(n));
cudaMallocManaged(&df, sizeof(Df));
cudaMallocManaged(&index, sizeof(Index));
cudaMallocManaged(&invertedIndexA, sizeof(InvertedIndexA));
cudaMallocManaged(&invertedIndexB, sizeof(InvertedIndexB));
cudaMallocManaged(&invertedIndexC, sizeof(InvertedIndexC));
cudaMallocManaged(&dn,sizeof(docNorms));
cudaMemcpy(a, d, sizeof(d), cudaMemcpyHostToDevice);
cudaMemcpy(b, t, sizeof(t), cudaMemcpyHostToDevice);
cudaMemcpy(c, n, sizeof(n), cudaMemcpyHostToDevice);
cudaMemcpy(df, Df, sizeof(Df), cudaMemcpyHostToDevice);
cudaMemcpy(index, Index, sizeof(Index), cudaMemcpyHostToDevice);
cudaMemcpy(invertedIndexA, InvertedIndexA, sizeof(InvertedIndexA), cudaMemcpyHostToDevice);
cudaMemcpy(invertedIndexB, InvertedIndexB, sizeof(InvertedIndexB), cudaMemcpyHostToDevice);
cudaMemcpy(invertedIndexC, InvertedIndexC, sizeof(InvertedIndexC), cudaMemcpyHostToDevice);
cudaMemcpy(dn,docNorms,sizeof(docNorms),cudaMemcpyHostToDevice);
int Temp[number_term] = { 0 };int *temp;
cudaMallocManaged(&temp, sizeof(Temp));
cudaMemcpy(temp, Temp, sizeof(Temp), cudaMemcpyHostToDevice);
printf("Initial Array:\n");
printf("d:");
printArray <<<1, arraySize>>> (a, sizeof(d) / sizeof(int));
cudaDeviceSynchronize();
printf("\n");
printf("t:");
printArray <<<1, arraySize >>> (b, sizeof(t) / sizeof(int));
cudaDeviceSynchronize();
printf("\n");
printf("Count_number_of_term: \n");
Count_number_of_term <<<blocks, thread_num>>> (b,df);
cudaDeviceSynchronize();
printArray <<<1, thread_num>>> (df, sizeof(Df) / sizeof(int));
cudaDeviceSynchronize();
printf("\n");
printf("Execute the prefix sum by Kogge Stone:\n");
Kogge_Stone_scan_kernel <<<blocks, thread_num>>> (df, index, arraySize, thread_num, temp);
cudaDeviceSynchronize();
//printf("Input count number array to the Kogge Stone:\n");
printArray <<<1, arraySize >>> (index, sizeof(Index) / sizeof(int));
cudaDeviceSynchronize();
printf("\n");
printf("InvertedIndex Array:\n");
Create_InvertedIndexA <<<1, thread_num >>> (a, b, df, index, invertedIndexA);
cudaDeviceSynchronize();
printf("Terms: \n");
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexA[j]);
}
printf("\n\n");
printf("Documents: \n");
Create_InvertedIndexB <<<blocks, thread_num >>> (a, b, c, df, index, invertedIndexB, invertedIndexC);
cudaDeviceSynchronize();
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexB[j]);
}
printf("\n\n");
printf("Norms: \n");
for (size_t j = 0; j < arraySize; j++) {
printf(" %d", invertedIndexB[j]);
}
printf("\n\n");
getDocNorm<<<1,numDocs>>>(a,b,c,dn,d1.size());
cudaDeviceSynchronize();
//Start Querying
ifstream ifq("querydoc.txt");
string qur;
qur.assign( (istreambuf_iterator<char>(ifq) ),
(istreambuf_iterator<char>() ) );
char qarr[qur.size()+1];
strcpy(qarr,qur.c_str());
vector<char*> vq;
vector<int> tq;
vector<double> tf;
char* query_array = strtok(qarr, "[");
while(query_array) {
vq.push_back(query_array);
query_array = strtok(NULL, "[");
}
for(size_t n = 0; n < vq.size(); ++n)
{
char* subchar_array = strtok(vq[n], ",");
while (subchar_array) {
if (n == 0)
tq.push_back(atoi(subchar_array));
else if (n == 1)
tf.push_back(atof(subchar_array));
subchar_array = strtok(NULL, ",");
}
}
int q_size = tq.size();
int qterm[q_size];
double sum[q_size];
double qtermfreq[tf.size()];
copy(tq.begin(), tq.end(), qterm);
copy(tf.begin(), tf.end(), qtermfreq);
int *qtptr;
double *qfptr, *ds;
double docSums[numDocs];
cudaMallocManaged(&qtptr,q_size*sizeof(int));
cudaMallocManaged(&qfptr,q_size*sizeof(double));
cudaMallocManaged(&ds,sizeof(docSums));
cudaMemcpy(qtptr,qterm,q_size *sizeof(int),cudaMemcpyHostToDevice);
cudaMemcpy(qfptr,qtermfreq,q_size *sizeof(double),cudaMemcpyHostToDevice);
cudaMemcpy(ds,docSums,sizeof(docSums),cudaMemcpyHostToDevice);
double q_norm = 0;
for (size_t j = 0; j < q_size; j++) {
q_norm+=(qtermfreq[j]*qtermfreq[j]);
}
q_norm = sqrt(q_norm);
knnTD<<<1,q_size>>>(qtptr,qfptr,invertedIndexB,invertedIndexC,ds,index);
cudaDeviceSynchronize();
knn<<<1,numDocs>>>(qtptr,qfptr,ds,dn,q_norm);
cudaDeviceSynchronize();
/*
printf("\n\nDoc Distances:\n");
for (size_t j = 0; j < numDocs; j++) {
printf(" %d -> %f\n",j+1,ds[j]);
}
*/
int docLabel[numDocs];
int *dl;
cudaMallocManaged(&dl,sizeof(docLabel));
cudaMemcpy(dl,docLabel,sizeof(docLabel),cudaMemcpyHostToDevice);
oddEvenSort<<<1,numDocs>>>(ds,dl,numDocs);
cudaDeviceSynchronize();
vector<int> nn;
printf("\nK Nearest Neighbors (k=%d): \n", k);
for (size_t j = 0; j < k; j++) {
if (find(nn.begin(), nn.end(), dl[j]) != nn.end()) {
} else {
nn.push_back(dl[j]/10);
}
printf(" %d -> %f -> label = %d\n", dl[j],ds[j],dl[j]/10);
}
int uniqueN[nn.size()];
copy(nn.begin(), nn.end(), uniqueN);
double kCount[nn.size()];
double *dc;
int *u;
cudaMallocManaged(&dc,sizeof(kCount));
cudaMallocManaged(&u,sizeof(uniqueN));
cudaMemcpy(dc,kCount,sizeof(kCount),cudaMemcpyHostToDevice);
cudaMemcpy(u,uniqueN,sizeof(uniqueN),cudaMemcpyHostToDevice);
classify<<<1,nn.size()>>>(dl,ds,k,dc,u);
cudaDeviceSynchronize();
double max = 0;
int max_i = 0;
for (size_t j = 0; j < nn.size(); j++) {
if(dc[j] > max) {
max = dc[j];
max_i = j;
}
}
printf("\nQuery Document is labelled = %d\n", u[max_i]);
printf("\n");
cudaFree(a);
cudaFree(b);
cudaFree(c);
cudaFree(df);
cudaFree(index);
cudaFree(invertedIndexA);
cudaFree(invertedIndexB);
cudaFree(invertedIndexC);
cudaFree(qtptr);
cudaFree(qfptr);
cudaFree(ds);
cudaFree(dn);
cudaFree(dl);
cudaFree(dc);
return 0;
}
|
f738029f09123d726939d4ab8f5b6e774a33f723.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__device__ void funny();
__declspec(dllexport) __global__ void doKernel2(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = c[i] + b[i] + a[i];
funny();
}
// Can declare, but cannot link in "hw".
__declspec(dllexport) __device__ void fun(int *c, const int *a, const int *b)
{
c[0] = c[0] + b[0] + a[0];
}
__global__ void doKernel3(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = c[i] + b[i] + a[i];
}
| f738029f09123d726939d4ab8f5b6e774a33f723.cu |
#include "cuda_runtime.h"
#include "device_launch_parameters.h"
#include <stdio.h>
__device__ void funny();
__declspec(dllexport) __global__ void doKernel2(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = c[i] + b[i] + a[i];
funny();
}
// Can declare, but cannot link in "hw".
__declspec(dllexport) __device__ void fun(int *c, const int *a, const int *b)
{
c[0] = c[0] + b[0] + a[0];
}
__global__ void doKernel3(int *c, const int *a, const int *b)
{
int i = threadIdx.x;
c[i] = c[i] + b[i] + a[i];
}
|
514479507d91d82c843defed9bfc17e5882c6251.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "includes.h"
__global__ void CudaKernel_BatchResize_GRAY2GRAY( int src_width, unsigned char* src_image, int num_rects, int* rects, int dst_width, int dst_height, float* dst_ptr )
{
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int dst_image_size = dst_width * dst_height;
if( num_rects*dst_image_size <= gid ){
return;
}
const int image_index = (int)(gid / dst_image_size);
const int pixel_index = gid % dst_image_size;
float scale_x = (float)(rects[image_index*4 + 2])/dst_width;
float fx = (float)(((pixel_index % dst_width)+0.5f)*scale_x - 0.5);
int coor_x_in_rect = floor(fx);
fx = 1.0f - (fx - (float)coor_x_in_rect);
float scale_y = (float)(rects[image_index*4 + 3])/dst_height;
float fy = (float)(((pixel_index / dst_width)+0.5f)*scale_y - 0.5);
int coor_y_in_rect = floor(fy);
fy = 1.0f - (fy - (float)coor_y_in_rect);
int src_x = rects[image_index*4 + 0];
int src_y = rects[image_index*4 + 1];
float value = 0.;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 0) + (src_x + coor_x_in_rect + 0)] * fx * fy;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 0) + (src_x + coor_x_in_rect + 1)] * (1.0f - fx)*fy;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 1) + (src_x + coor_x_in_rect + 0)] * fx*(1.0f - fy);
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 1) + (src_x + coor_x_in_rect + 1)] * (1.0f - fx)*(1.0f - fy);
dst_ptr[blockIdx.x * blockDim.x + threadIdx.x] = value / 255.f;
} | 514479507d91d82c843defed9bfc17e5882c6251.cu | #include "includes.h"
__global__ void CudaKernel_BatchResize_GRAY2GRAY( int src_width, unsigned char* src_image, int num_rects, int* rects, int dst_width, int dst_height, float* dst_ptr )
{
const int gid = blockIdx.x * blockDim.x + threadIdx.x;
const int dst_image_size = dst_width * dst_height;
if( num_rects*dst_image_size <= gid ){
return;
}
const int image_index = (int)(gid / dst_image_size);
const int pixel_index = gid % dst_image_size;
float scale_x = (float)(rects[image_index*4 + 2])/dst_width;
float fx = (float)(((pixel_index % dst_width)+0.5f)*scale_x - 0.5);
int coor_x_in_rect = floor(fx);
fx = 1.0f - (fx - (float)coor_x_in_rect);
float scale_y = (float)(rects[image_index*4 + 3])/dst_height;
float fy = (float)(((pixel_index / dst_width)+0.5f)*scale_y - 0.5);
int coor_y_in_rect = floor(fy);
fy = 1.0f - (fy - (float)coor_y_in_rect);
int src_x = rects[image_index*4 + 0];
int src_y = rects[image_index*4 + 1];
float value = 0.;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 0) + (src_x + coor_x_in_rect + 0)] * fx * fy;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 0) + (src_x + coor_x_in_rect + 1)] * (1.0f - fx)*fy;
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 1) + (src_x + coor_x_in_rect + 0)] * fx*(1.0f - fy);
value += (float)src_image[src_width*(src_y + coor_y_in_rect + 1) + (src_x + coor_x_in_rect + 1)] * (1.0f - fx)*(1.0f - fy);
dst_ptr[blockIdx.x * blockDim.x + threadIdx.x] = value / 255.f;
} |
a4d809048f741f8b1c36759b5f8f606a6ed6b6b2.hip | // !!! This is a file automatically generated by hipify!!!
/*
Project 3
Summer 2018
Brian Pinson
Karshan Arjun
Mark Tushemereiwe
*/
/** *******************************************************************
* File name : quadtreeGPU.cu
* Construct quadtree in CPU. The version with all edited function
*
** *******************************************************************/
/**<************************# Includes ********************************/
#include<stdio.h>
#include<stdlib.h>
#include"MemoryManager.h"
#include<unistd.h>
#include<sys/time.h>
#include <stdbool.h>
#include<stdlib.h>
#include<cstdlib>
#include <hip/hip_runtime.h>
#include <math.h>
#include <hiprand/hiprand.h>
#include <hiprand/hiprand_kernel.h>
#include<time.h>
#include<string.h>
#include <iostream>
#include <cmath>
#include <limits>
#include <float.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#ifdef __CDT_PARSER__
/**<************************# Defines *********************************/
#define __host__
#define __shared__
#define CUDA_KERNEL_DIM(...)
#else
#define CUDA_KERNEL_DIM(...) <<< __VA_ARGS__ >>>
#endif
#define BUILD_FULL 1
#define BUILD_ADAPTIVE 2
#define MODE_RANDOM 1
#define MODE_FILE 2
#define TRUE 1
#define FALSE 0
#define pMax 32
#ifndef RANGE
#define RANGE 24000
//#define RANGE 1024
#endif
#define BLOCK_SIZE 1024
#define CUDA_BLOCK_SIZE 64
#define STACK_MAX 36
#define BUFFER_SIZE 1024
#define Leaf_SIZE 1024
#define INSERT_BLOCK_SIZE 1024
#define PAGE_SIZE 40
#define NB_PAGE_SIZE 50
#define LEAF_BUFFER_SIZE 1024
#define MAX_LEAF_CAPACITY 5120
__device__ __constant__ int bucket_size;
__device__ __constant__ int max_levels = 10;
__constant__ long long PDH_acnt_CUDA; // constant memory number of points
__constant__ double PDH_res_CUDA; // constant memory width size
extern __shared__ double sharedMemory[]; // shared memory to contain points
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true)
{
if (code != hipSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//typedef int POINTID;
//typedef int NODEID;
//typedef int BUFFID;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/* helps keep track of tree child nodes*/
struct tree_path
{
NODEID child[4];
};
/*
int numLevels = 10;
int maxNodes=349525;
int maxLeaves=262144;
int maxLeafParent=65536;
//level 8
int maxNodes=21845;
int maxLeaves=16384;
int maxLeafParent=4096;
*/
/**<***************** Global variables ****************************/
int pointMode = MODE_RANDOM;
char *inputPointFileName;
char *outputTreeFileName;
int rangeSize = RANGE;
//int bucketSize = 512;
int bucketSize = 1024;
//int numPoints = 8192000;
//int numPoints = 409600;
int numPoints = 16384000;
int numLevels = 10;
int maxNodes = 349525;
int maxLeaves = 262144;
int maxLeafParent = 65536;
int numSearches = 10;
int printTree = 1;
int outputTree = 0;
int quadTreeMode = BUILD_FULL;
//int quadTreeMode = BUILD_ADAPTIVE;
//int numPolygon = 1099120;
int pointRangeX = RANGE;
int pointRangeY = RANGE;
int completeIndex = 0;
int NotIndex = 0;
int PartialIndex = 0;
int arraysize = 100;
int globalLevel = 0;
int globalpoint = 0;
/**<***************** enums ******************************/
//enum {
// TYPE_NONE = 0, TYPE_ROOT, TYPE_LINK, TYPE_LEAF, TYPE_INV
//};
//
//enum {
// FullyOverlapped = 0, PartiallyOverlapped
//};
//for tree construction
int *d_node_counter;
int * d_split_node;
int * d_node_id;
int * d_level;
int* d_point_node;
__device__ unsigned int d_node_allocate = 0;
__device__ unsigned int d_point_allocate = 0;
//define constant
//__device__ unsigned int d_max_level= 0;
unsigned int h_node_allocate = 0;
unsigned int h_point_allocate = 0;
struct buffer {
//int id;
int leafId;
int numberOfQueries;
unsigned long int queries[BUFFER_SIZE];
};
typedef struct LEAF_BUFFER {
// Array of points
unsigned long int queryList[LEAF_BUFFER_SIZE];
//unsigned int querytCount;
//unsigned long int nextBufferId;
} LEAF_BUFFER;
struct Output {
unsigned long long int offset[7];
int page_num;
}Output;
float *d_query_POINT_x;
float *d_query_POINT_y;
int *d_query_POINT_id;
float2 *d_positions;
unsigned long long int *leaf_m_address;
int* d_POINT_nodeid;
//for output
struct Output *d_output;
struct Output *d_output_nonBuffer;
struct Output *h_output;
__device__ unsigned int d_leaves_allocate = 0;
__device__ unsigned int d_leaf_blocks = 0;
int* d_leave_list;
__device__ int d_zeros = 0;
unsigned int h_zeros = 0;
//for saving the intersecting leaves
int *d_intersecting_leave_nodes; //save intersecting leave nodes
int *d_intersecting_leave_count; //count the intersection
__device__ int d_counter_one = 0;
unsigned int h_counter_one = 0;
__device__ int d_split_array_zero = 0;
unsigned int h_split_array_zero = 0;
__global__ void setRootNodeKernel(float xPos, float yPos, int *d_node_counter, int *d_split_node, int *d_level, float2 *d_positions, int numberOfPoints) {
d_node_counter[0] = numberOfPoints;
d_split_node[0] = 1;
d_positions[0].x = xPos;
d_positions[0].y = yPos;
d_level[0] = 0;
}
//get direction
__device__ int getNodeDirection(float posX, float posY, float width, float height, float x, float y) {
if ((x >= posX) && (x < posX + width) && (y >= posY + height)
&& (y < posY + height + height)) {
return 0;
}
else if ((x >= posX + width) && (x < posX + width + width) && (y >= posY + height)
&& (y < posY + height + height)) {
return 1;
}
else if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
return 2;
}
else if ((x >= posX + width) && (x < posX + width + width) && (y >= posY)
&& (y < posY + height)) {
return 3;
}
else {
return -1;
}
}
__global__ void countThePointsInPositions(float width, float height, int level, float* d_queries_x, float* d_queries_y, int *d_node_counter, int *d_split_node, int *d_level, int numberOfthreads, int blocks_num, float2 *d_positions, int *d_point_node) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < numberOfthreads) {
register float x = d_queries_x[tid];
register float y = d_queries_y[tid];
register int myCount = 0;
register int direction = -1;
register int node_Id = d_point_node[tid];
register float posX = d_positions[node_Id].x;
register float posY = d_positions[node_Id].y;
register int mem_position;
if (d_split_node[node_Id] == 1) {
direction = getNodeDirection(posX, posY, width, height, x, y);
if (direction != -1) {
mem_position = (((node_Id * 4) + direction) + 1);
d_point_node[tid] = mem_position;
// if (tid ==0){
// printf("x:%f, y: %f , direction:%i, node_id:%i, dir:%i , xpos:%f, ypos:%f \n", x, y, direction, node_Id, mem_position, posX, posY);
// }
if ((d_split_node[mem_position] == 0 || (level == max_levels))) {
//&& d_split_node[mem_position]==0
myCount = atomicAdd(&d_node_counter[mem_position], 1);
if (myCount == bucket_size && (level < max_levels)) {
d_split_node[mem_position] = 1;
d_level[mem_position] = level;
// float width = pWidth / 2.00;
// float height = pHeight / 2.00;
//
switch (direction) {
case 0: // NW
posX = posX;
posY = posY + height;
d_positions[mem_position].x = posX;
d_positions[mem_position].y = posY;
break;
case 1: // NE
posX = posX + width;
posY = posY + height;
d_positions[mem_position].x = posX;
d_positions[mem_position].y = posY;
break;
case 2: // SW
posX = posX;
posY = posY;
d_positions[mem_position].x = posX;
d_positions[mem_position].y = posY;
break;
case 3: // SE
posX = posX + width;
posY = posY;
d_positions[mem_position].x = posX;
d_positions[mem_position].y = posY;
break;
}
// printf("tid: %li, node id:%i, xpos:%f, ypos:%f, dplit:%i\n", tid, mem_position, posX, posY, d_split_node[mem_position]);
}
}
}
}
}
__syncthreads();
}
__device__ inline void device_setNode(NODEID nodeid, float x, float y, float w, float h, int type, int level, int parentIndex, NODE* d_NODE, int open) {
// Get memory for node.
// Set the 5 parameters.
d_NODE[nodeid].index = nodeid;
d_NODE[nodeid].posX = x;
d_NODE[nodeid].posY = y;
d_NODE[nodeid].width = w;
d_NODE[nodeid].height = h;
d_NODE[nodeid].level = level;
// Reset all of the tracking values.
int i;
for (i = 0; i < 4; i++)
{
d_NODE[nodeid].child[i] = -1;
//node->count[i] = 0;
}
d_NODE[nodeid].total = 0;
//node->index = 0;
//node->offset = 0;
d_NODE[nodeid].open = open;
d_NODE[nodeid].type = type;
d_NODE[nodeid].pBuffer = -1;
d_NODE[nodeid].parent_index = parentIndex;
d_NODE[nodeid].leafBufferStart = -1;
d_NODE[nodeid].totalRegisterQuery = 0;
//d_NODE[nodeid].newCount=0;
}
__device__ inline int getDirection(unsigned long long int tid) {
int direction = (tid % 4);
int actualDirection;
switch (direction) {
case 0:
//child SE dir =3
actualDirection = 3;
break;
case 1:
//child NW dir =0
actualDirection = 0;
break;
case 2:
//child NE dir=1
actualDirection = 1;
break;
case 3:
//child SW dir =2
actualDirection = 2;
break;
}
return actualDirection;
}
__global__ void createRootNodeKernel(float posX, float posY, float pWidth, float pHeight, struct NODE* d_NODE, int *d_node_id) {
register int myindex = 0;
myindex = atomicAdd(&d_node_allocate, 1);
d_node_id[0] = myindex;
device_setNode(myindex, posX, posY, pWidth, pHeight, TYPE_ROOT, 0, -1, d_NODE, false);
}
__global__ void createParentNodesKernel(float posX, float posY, float pWidth, float pHeight, struct NODE* d_NODE, int *d_node_counter, int *d_split_node, int maxNodes, int *d_node_id, int *d_level, float2 *d_positions) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < maxNodes && d_node_counter[tid] != 0 && tid != 0) {
register int myindex = 0;
myindex = atomicAdd(&d_node_allocate, 1);
d_node_id[tid] = myindex;
// if (tid == 0){
//
// device_setNode(myindex, posX, posY, pWidth, pHeight, TYPE_ROOT, 0, 0, d_NODE, false);
// //printf("my index is:%i \n", myindex);
// }
// else {
register int direction = getDirection(tid);
register int parent;
parent = (tid - direction - 1) / 4;
register int level;
register float xPos;
register float yPos;
register int type;
register float width;
register float height;
register int open;
// register int total;
if (d_split_node[tid] == 1) {
//this is a link node
level = d_level[tid];
xPos = d_positions[tid].x;
yPos = d_positions[tid].y;
type = TYPE_LINK;
width = pWidth / (float)(pow((float)2, (float)level));
height = pHeight / (float)(pow((float)2, (float)level));
open = FALSE;
// total= d_node_counter[tid];
}
else {
//this is a leaf node
level = d_level[parent] + 1;
type = TYPE_LEAF;
xPos = d_positions[parent].x;
yPos = d_positions[parent].y;
width = pWidth / (float)(pow((float)2, (float)level));
height = pHeight / (float)(pow((float)2, (float)level));
open = TRUE;
// total =0;
switch (direction) {
case 0:
//child SE
xPos = xPos;
yPos = yPos + height;
break;
case 1:
//child NW
xPos = xPos + width;
yPos = yPos + height;
break;
case 2:
//child NE
xPos = xPos;
yPos = yPos;
break;
case 3:
//child SW
xPos = xPos + width;
yPos = yPos;
break;
}
}
// if (tid==1 ){
// printf("my index is:%i , direction is: %i , parent is:%i, total:%i, open:%i, level:%i, xpos:%f, ypos:%f, width:%f, height:%f, type:%i \n", myindex, direction , parent, d_node_counter[tid], open, level, xPos, yPos, width, height, type );
// }
device_setNode(myindex, xPos, yPos, width, height, type, level, parent, d_NODE, open);
// }
}
__syncthreads();
}
//__global__ void finalNodesSetUpKernel( struct NODE* d_NODE, int *d_node_counter, int *d_split_node, int maxNodes, int *d_node_id, int *d_leave_list, unsigned long long int *Address, unsigned long long int *d_leaf_buffer_list){
__global__ void finalNodesSetUpKernel(struct NODE* d_NODE, int *d_node_counter, int *d_split_node, int maxNodes, int *d_node_id, int *d_leave_list, unsigned long long int *Address) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < maxNodes) {
if (d_node_counter[tid] != 0 && tid != 0) {
register int nodeid = d_node_id[tid];
register int parentNodeId = d_node_id[d_NODE[nodeid].parent_index];
d_NODE[nodeid].parent_index = parentNodeId;
register int direction = getDirection(tid);
d_NODE[parentNodeId].child[direction] = nodeid;
if (d_split_node[tid] != 1) {
//this is a leaf
register int myindex = 0;
myindex = atomicAdd(&d_point_allocate, d_node_counter[tid]);
d_NODE[nodeid].pBuffer = myindex;
myindex = atomicAdd(&d_leaves_allocate, 1);
d_leave_list[myindex] = nodeid;
unsigned long long int offsetAddress = atomicAdd(Address, BUFFER_SIZE);
d_NODE[nodeid].leafBufferStart = offsetAddress;
d_NODE[nodeid].totalRegisterQuery = 0;
}
}
}
__syncthreads();
}
//__device__ NODEID findQuadTreeNodeCuda(NODEID nParentid, float x, float y, NODE* d_NODE, unsigned long long int tid ) {
__device__ NODEID findQuadTreeNodeCuda(NODEID nParentid, float x, float y, NODE* d_NODE) {
register float posX, posY;
register int index;
if (nParentid == -1)
return nParentid;
register NODE nParent = d_NODE[nParentid];
if (nParent.type == TYPE_LEAF)
return nParentid;
// Get the point.
// Child width and height
register float width;
register float height;
// if (tid ==0){
// printf("nparent is: %i , with:%f, height:%f, child0:%i, child1:%i, child2:%i, child3:%i\n", nParentid, nParent.width , nParent.height, nParent.child[0], nParent.child[1], nParent.child[2], nParent.child[3]);
// }
while (nParent.type != TYPE_LEAF) {
width = nParent.width / 2.00;
height = nParent.height / 2.00;
for (index = 0; index < 4; index++) {
switch (index) {
case 0: // NW
posX = nParent.posX;
posY = nParent.posY + height;
if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
nParentid = nParent.child[0];
}
break;
case 1: // NE
posX = nParent.posX + width;
posY = nParent.posY + height;
if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
nParentid = nParent.child[1];
}
break;
case 2: // SW
posX = nParent.posX;
posY = nParent.posY;
if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
nParentid = nParent.child[2];
}
break;
case 3: // SE
posX = nParent.posX + width;
posY = nParent.posY;
if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
nParentid = nParent.child[3];
}
break;
}
}
if (nParentid == -1)
return nParentid;
nParent = d_NODE[nParentid];
// if (tid ==0){
// printf("nparent is: %i \n", nParentid);
// }
}
return nParentid;
}
__global__ void insertIntoLeafNodes(int *d_node_id, float* d_query_POINT_x, float* d_query_POINT_y, int *d_query_POINT_id, NODE* d_NODE, float *d_POINT_x, float *d_POINT_y, int *d_POINT_id, int *d_point_node, int numPoints, NODEID *d_POINT_nodeid) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < numPoints) {
register int myindex;
register NODEID leaf = d_node_id[d_point_node[tid]];
register float x = d_query_POINT_x[tid];
register float y = d_query_POINT_y[tid];
register int index = d_query_POINT_id[tid];
if (d_NODE[leaf].type == TYPE_LEAF) {
myindex = atomicAdd(&d_NODE[leaf].total, 1);
if ((myindex <bucket_size && d_NODE[leaf].pBuffer != -1) || (myindex >= bucket_size && d_NODE[leaf].level == max_levels) && d_NODE[leaf].pBuffer != -1) {
d_POINT_id[(d_NODE[leaf].pBuffer + myindex)] = index;
d_POINT_x[(d_NODE[leaf].pBuffer + myindex)] = x;
d_POINT_y[(d_NODE[leaf].pBuffer + myindex)] = y;
d_POINT_nodeid[(d_NODE[leaf].pBuffer + myindex)] = leaf;
}
}
}
__syncthreads();
}
/***************************************** end of building the tree ***************************/
/*
search on GPU
*/
//non Buffer range search
__global__ void countTheNumberOfZeros(int *d_split_node, int startLevelNode, int numberOfActiveThreads) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < numberOfActiveThreads) {
if (d_split_node[startLevelNode + tid] == 0) {
atomicAdd(&d_zeros, 1);
}
}
__syncthreads();
}
//count the number of non-empty nodes in the tree
__global__ void countTheOnesInCounterArray(int *d_node_counter, int maxNodes) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < maxNodes) {
if (d_node_counter[tid] != 0) {
atomicAdd(&d_counter_one, 1);
}
}
__syncthreads();
}
//count the number of non link nodes in the tree
__global__ void countTheNonLeafNodes(int *d_split_node, int maxNodes) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < maxNodes) {
if (d_split_node[tid] == 0) {
atomicAdd(&d_split_array_zero, 1);
}
}
__syncthreads();
}
/*
Rebuilds the Quadtree to make it work
*/
__global__ void CUDA_RebuildTree(NODE * d_NODE, int num_of_nodes, tree_path *tree)
{
int i = 0;
for (i = 0; i < num_of_nodes; i++)
{
int j = 0;
// printf("node %i", d_NODE[i].index);
for (j = 0; j < 4; j++)
{
tree[i].child[j] = d_NODE[i].child[j];
// printf(" child %i", tree[i].child[j]);
}
// printf(" parent %i\n", d_NODE[i].parent_index);
}
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
CPU Calculator
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
/* descriptors for single atom in the tree */
typedef struct atomdesc
{
double x_pos;
double y_pos;
} atom;
typedef struct hist_entry
{
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * CPU_histogram; /* list of all buckets in the histogram */
bucket * GPU_histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* These are for an old way of tracking time */
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
return sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2));
}
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for (i = 0; i < PDH_acnt; i++) {
for (j = i + 1; j < PDH_acnt; j++) {
dist = p2p_distance(i, j);
h_pos = (int)(dist / PDH_res);
CPU_histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time()
{
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff = endTime.tv_usec - startTime.tv_usec;
if (usec_diff < 0) {
sec_diff--;
usec_diff += 1000000;
}
printf("\n\nRunning time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff / 1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket * input_histogram)
{
int i;
long long total_cnt = 0;
for (i = 0; i< num_buckets; i++) {
if (i % 5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", input_histogram[i].d_cnt);
total_cnt += input_histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if (i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
/* Prints difference between buckets by using an altered histogram printing function */
void histogram_comparison(bucket *input1, bucket *input2)
{
printf("Difference Between CPU and CUDA histograms: \n");
int i;
long long total_cnt = 0;
for (i = 0; i< num_buckets; i++) {
if (i % 5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", input2[i].d_cnt - input1[i].d_cnt);
total_cnt += input1[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if (i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
/* returns distance */
__device__ double CUDA_distance_calculator(double x1, double y1, double x2, double y2)
{
return sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2));
}
//////////////////////////////////////////////////
//////////////////////////////////////////////////
/* Quad Tree Traversal */
/* Histogram Calculator */
//////////////////////////////////////////////////
//////////////////////////////////////////////////
__global__ void CUDA_Calculate_Histogram(bucket *histogram_cuda, NODE *d_NODE, int num_of_nodes, float *d_POINT_x, float *d_POINT_y, NODEID *d_POINT_nodeid, tree_path *tree, int bucket_num)
{
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < PDH_acnt_CUDA)
{
NODEID no = d_POINT_nodeid[tid]; // node id
register double x = d_POINT_x[tid]; // x coordinate
register double y = d_POINT_y[tid]; // y coordinate
NODE node = d_NODE[no]; // sets node
double distance = 0;
int h_pos = 0;
int *SHMOut = (int *)sharedMemory;
int i = 0;
int j = 0;
if(threadIdx.x == 0)
for (; i < bucket_num; i++) SHMOut[i] = 0;
__syncthreads();
for (i = tid - node.pBuffer + 1; i < node.total; i++) // scans through current node, finds point's index and calculates histogram for all points of a higher index
{
distance = CUDA_distance_calculator(x, y, d_POINT_x[node.pBuffer + i], d_POINT_y[node.pBuffer + i]);
h_pos = (int)(distance / PDH_res_CUDA);
atomicAdd(&SHMOut[h_pos], 1);
}
for (i = no + 1; i < num_of_nodes; i++) // scans through all nodes greater than current node
{
node = d_NODE[i];
for (j = 0; j < node.total; j++) // calculates histogram for all other points
{
distance = CUDA_distance_calculator(x, y, d_POINT_x[node.pBuffer + j], d_POINT_y[node.pBuffer + j]);
h_pos = (int)(distance / PDH_res_CUDA);
atomicAdd(&SHMOut[h_pos], 1);
}
}
__syncthreads();
if (threadIdx.x == 0)
{
for (i = 0; i < bucket_num; i++)
{
atomicAdd((unsigned long long int*) &histogram_cuda[i].d_cnt, (unsigned long long int) SHMOut[i]);
}
}
}
}
//////////////////////////////////////////////////
//////////////////////////////////////////////////
/* Quad Tree Kernel */
/* Sets up and Launches the CUDA kernel */
//////////////////////////////////////////////////
//////////////////////////////////////////////////
void Quad_Tree_Traversal(int num_buckets, int grid, int threads, int gpu_nodes, tree_path *tree)
{
bucket *cuda_histogram = NULL; /* Mallocs histogram in GPU */
hipMalloc((void **)&cuda_histogram, num_buckets * sizeof(bucket));
hipMemcpy(cuda_histogram, GPU_histogram, num_buckets * sizeof(bucket), hipMemcpyHostToDevice);
hipMemcpyToSymbol(PDH_acnt_CUDA, &PDH_acnt, sizeof(signed long long)); // constant memory atom size
hipMemcpyToSymbol(PDH_res_CUDA, &PDH_res, sizeof(double)); // constant memory width sizes
float elapsedTime = 0;
hipEvent_t start_time, stop_time;
hipEventCreate(&start_time);
hipEventCreate(&stop_time);
hipEventRecord(start_time, 0);
hipLaunchKernelGGL(( CUDA_Calculate_Histogram) , dim3(grid), dim3(threads),num_buckets * sizeof(int), 0, cuda_histogram, d_NODE, gpu_nodes, d_POINT_x, d_POINT_y, d_POINT_nodeid, tree, num_buckets);
hipEventRecord(stop_time, 0);
hipEventSynchronize(stop_time);
hipEventElapsedTime(&elapsedTime, start_time, stop_time);
hipEventDestroy(start_time);
hipEventDestroy(stop_time);
hipMemcpy(GPU_histogram, cuda_histogram, num_buckets * sizeof(bucket), hipMemcpyDeviceToHost);
hipFree(cuda_histogram);
printf("\nCUDA Kernel results:\n");
printf("Time to generate: %0.5f ms\n\n", elapsedTime);
output_histogram(GPU_histogram);
printf("\n");
histogram_comparison(CPU_histogram, GPU_histogram);
free(GPU_histogram);
}
/////////////////////////
/////////////////////////
/* End Quad Tree traversal */
/////////////////////////
/////////////////////////
/**<************************ Main function ***************************/
/**
* Two techniques to build QuadTrees
* 1- full : extend all the way down, only leafs hold points
* : counts are kept at intermediate levels
* : nulls are still used to know where points are.
* 2- adaptive : items are pushed around as needed to form tree
* : points of LIMIT pushed down.
** ******************************************************************/
int main(int argc, char **argv) {
if (argc < 4)
{
printf("you should insert the number of points, mmaximum number of points alowed in each node, and maximum number of levels alowed in the tree to the program to run\n");
return 1;
}
//number of points in the tree
unsigned long long int numberOfthreads = atoi(argv[1]);
numPoints = numberOfthreads;
//mmaximum number of points alowed in each node
bucketSize = atoi(argv[2]);
//maximum number of levels alowed in the tree
numLevels = atoi(argv[3]);
//maximum number of possible nodes based on the numLevels
maxNodes = ((pow(4, numLevels)) - 1) / 3;
printf("maxNodes is:%i \n", maxNodes);
PDH_res = 500;
// unsigned long long int numberOfthreads = numPoints;
hipError_t err = hipSetDevice(0);
float *h_POINT_x = (float *)malloc(numPoints * sizeof(float));
float *h_POINT_y = (float *)malloc(numPoints * sizeof(float));
int *h_POINT_id = (int *)malloc(numPoints * sizeof(int));
memset(h_POINT_x, 0, numPoints * sizeof(float));
memset(h_POINT_y, 0, numPoints * sizeof(float));
memset(h_POINT_id, 0, numPoints * sizeof(int));
atom_list = (atom *)malloc(sizeof(atom)*(numPoints));
long q;
srand(time(NULL));
//srand48(4);
for (q = 0; q<numPoints; q++) {
h_POINT_id[q] = q;
float x = ((float)(rand()) / RAND_MAX) * RANGE;
float y = ((float)(rand()) / RAND_MAX) * RANGE;
h_POINT_x[q] = x;
h_POINT_y[q] = y;
atom_list[q].x_pos = (double)x;
atom_list[q].y_pos = (double)y;
}
printf("start main \n");
//NODEID rootNode;
// Get memory for root node.
// Start node : root
//setNode(rootNode, 0, 0, rangeSize, rangeSize, TYPE_ROOT, 0, -1);
// Create the quadtree.
//srand48(4);
// Preallocate memory for all objects in CPU.
hipMalloc((void**)&d_node_counter, sizeof(int)*maxNodes);
gpuErrchk(hipPeekAtLastError());
hipMalloc((void**)&d_split_node, sizeof(int)*maxNodes);
gpuErrchk(hipPeekAtLastError());
hipMalloc((void**)&d_node_id, sizeof(int)*maxNodes);
gpuErrchk(hipPeekAtLastError());
hipMalloc((void**)&d_level, sizeof(int)*maxNodes);
hipMalloc((void**)&d_positions, sizeof(float2)*maxNodes);
hipMalloc((void**)&d_query_POINT_x, sizeof(float)*numPoints);
hipMalloc((void**)&d_query_POINT_y, sizeof(float)*numPoints);
hipMalloc((void**)&d_point_node, sizeof(int)*numPoints);
hipMalloc((void**)&d_query_POINT_id, sizeof(int)*numPoints);
gpuErrchk(hipPeekAtLastError());
hipMemset(d_node_counter, 0, sizeof(int)*maxNodes);
hipMemset(d_split_node, 0, sizeof(int)*maxNodes);
hipMemset(d_node_id, 0, sizeof(int)*maxNodes);
hipMemset(d_level, 0, sizeof(int)*maxNodes);
hipMemset(d_query_POINT_x, 0, sizeof(float)*numPoints);
hipMemset(d_query_POINT_y, 0, sizeof(float)*numPoints);
hipMemset(d_point_node, 0, sizeof(int)*numPoints);
hipMemset(d_query_POINT_id, 0, sizeof(int)*numPoints);
hipMemset(d_positions, 0, sizeof(float2)*maxNodes);
gpuErrchk(hipPeekAtLastError());
// hipMemcpyToSymbol(bucket_size, &bucketSize, sizeof(int), 0, hipMemcpyHostToDevice);
hipMemcpyToSymbol(bucket_size, &bucketSize, sizeof(int));
gpuErrchk(hipPeekAtLastError());
hipMemcpy(d_query_POINT_x, h_POINT_x, sizeof(float)*numPoints, hipMemcpyHostToDevice);
hipMemcpy(d_query_POINT_y, h_POINT_y, sizeof(float)*numPoints, hipMemcpyHostToDevice);
hipMemcpy(d_query_POINT_id, h_POINT_id, sizeof(int)*numPoints, hipMemcpyHostToDevice);
gpuErrchk(hipPeekAtLastError());
//thrust
thrust::device_ptr<int> dev_ptr(d_node_counter);
thrust::device_ptr<int> dev_split(d_split_node);
/* start counting time */
hipEvent_t start, stop, start1, stop1;
//run the simulation
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventCreate(&start1);
hipEventCreate(&stop1);
// err = hipMemPrefetchAsync(d_query_POINT_x, sizeof(float)*numPoints, 0);
// err = hipMemPrefetchAsync(d_query_POINT_y, sizeof(float)*numPoints, 0);
// err = hipMemPrefetchAsync(d_query_POINT_id, sizeof(int)*numPoints, 0);
// err = hipMemPrefetchAsync(d_node_counter, sizeof(int)*maxNodes , 0);
// err = hipMemPrefetchAsync(d_split_node, sizeof(int)*maxNodes , 0);
// err = hipMemPrefetchAsync(d_node_id, sizeof(int)*maxNodes , 0);
// err = hipMemPrefetchAsync(d_level, sizeof(int)*maxNodes , 0);
// err = hipMemPrefetchAsync(d_positions, sizeof(float2)*maxNodes , 0);
// err = hipMemPrefetchAsync(d_point_node, sizeof(int)*numPoints , 0);
gpuErrchk(hipPeekAtLastError());
float elapsedTime;
float totalBuildingTime = 0.0;
//int blocks_num = 2048000 / BLOCK_SIZE;
int blocks_num;
if (numPoints % BLOCK_SIZE == 0) {
blocks_num = numPoints / BLOCK_SIZE;
}
else {
blocks_num = numPoints / BLOCK_SIZE + 1;
}
int cuda_block_num;
if (numPoints % CUDA_BLOCK_SIZE == 0) {
cuda_block_num = numPoints / CUDA_BLOCK_SIZE;
}
else {
cuda_block_num = numPoints / CUDA_BLOCK_SIZE + 1;
}
printf("block num is: %i and cuda block num is:%i\n", blocks_num, cuda_block_num);
printf("BLOCK_SIZE is: %i \n", BLOCK_SIZE);
dim3 grid(blocks_num, 1, 1);
dim3 threads(BLOCK_SIZE, 1, 1);
printf("before calling the first kernel\n");
// printf("data point in gpu is %p \n", d_POINT+0);
//todo check the seed
//todo comment
//setup_kernel << <1, BLOCK_SIZE >> >(state, unsigned(time(NULL)) +1);
// gpuErrchk(hipDeviceSynchronize());
// Size of quadrant
float sqrange = RANGE;
printf("sqrange is: %f \n", sqrange);
// call the setNode 0
dim3 grid0(1, 1, 1);
dim3 threads0(1, 1, 1);
hipEventRecord(start1, 0);
setRootNodeKernel << <grid0, threads0 >> > (0.0, 0.0, d_node_counter, d_split_node, d_level, d_positions, numPoints);
hipEventRecord(stop1, 0);
hipEventSynchronize(stop1);
hipEventElapsedTime(&elapsedTime, start1, stop1);
printf("******** Total Running Time of creating root= %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
gpuErrchk(hipDeviceSynchronize());
int level = 0;
int max_level = 10;
bool flag = true;
// int startLevelNode = pow (4, level);
// int endtLevelNode = pow (4, (level+1));
int startLevelNode = 1;
int endtLevelNode = 4;
int split = 0;
float width = float(RANGE) / 2.00;
float height = float(RANGE) / 2.00;
//for new function
int numberOfActiveThreads = 0;
int zeroCount_block_num;
dim3 threadsz(CUDA_BLOCK_SIZE, 1, 1);
int previousSplit = 0;
hipEventRecord(start1, 0);
while (level < max_level && flag == true) {
countThePointsInPositions << <grid, threads >> > (width, height, level + 1, d_query_POINT_x, d_query_POINT_y, d_node_counter, d_split_node, d_level, numPoints, cuda_block_num, d_positions, d_point_node);
gpuErrchk(hipDeviceSynchronize());
numberOfActiveThreads = endtLevelNode - startLevelNode + 1;
//printf("number of active threads is: %i \n ", numberOfActiveThreads);
if (numberOfActiveThreads % CUDA_BLOCK_SIZE == 0) {
zeroCount_block_num = numberOfActiveThreads / CUDA_BLOCK_SIZE;
}
else {
zeroCount_block_num = numberOfActiveThreads / CUDA_BLOCK_SIZE + 1;
}
dim3 gridz(zeroCount_block_num, 1, 1);
// hipMemcpyToSymbol("d_zeros", &h_zeros, sizeof(int));
countTheNumberOfZeros << <gridz, threadsz >> > (d_split_node, startLevelNode, numberOfActiveThreads);
gpuErrchk(hipDeviceSynchronize());
hipMemcpyFromSymbol(&split, d_zeros, sizeof(unsigned int), 0, hipMemcpyDeviceToHost);
split = split - previousSplit;
// printf("level is: %i, start is:%i, end is: %i , split:%i \n", level, startLevelNode,endtLevelNode, split );
if (split == endtLevelNode - startLevelNode + 1) {
flag = false;
}
else {
flag = true;
width = width / 2.00;
height = height / 2.00;
level = level + 1;
startLevelNode = startLevelNode + pow(4, level);
endtLevelNode = startLevelNode + pow(4, (level + 1)) - 1;
previousSplit = previousSplit + split;
}
}
hipEventRecord(stop1, 0);
hipEventSynchronize(stop1);
printf("after calling the kernel\n");
hipEventElapsedTime(&elapsedTime, start1, stop1);
printf("******** Total Running Time of inserting positions is = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
printf("level after kernel is: %i \n", level);
int node_blocks_num;
int NODE_BLOCK_SIZE = 32;
if (maxNodes % NODE_BLOCK_SIZE == 0) {
node_blocks_num = maxNodes / NODE_BLOCK_SIZE;
}
else {
node_blocks_num = maxNodes / NODE_BLOCK_SIZE + 1;
}
printf("maxNode is: %i, node block num is: %i \n", maxNodes, node_blocks_num);
dim3 grid1(node_blocks_num, 1, 1);
dim3 threads1(NODE_BLOCK_SIZE, 1, 1);
hipEventRecord(start, 0);
countTheOnesInCounterArray << <grid1, threads1 >> > (d_node_counter, maxNodes);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
gpuErrchk(hipDeviceSynchronize());
hipEventElapsedTime(&elapsedTime, start, stop);
printf("******** cont the number of ones in counter array = %0.5f ms \n", elapsedTime);
hipMemcpyFromSymbol(&h_counter_one, d_counter_one, sizeof(unsigned int), 0, hipMemcpyDeviceToHost);
printf("ones are: %i non empty nodes \n", h_counter_one);
int gpu_nodes = h_counter_one;
totalBuildingTime = totalBuildingTime + elapsedTime;
hipEventRecord(start, 0);
countTheNonLeafNodes << <grid1, threads1 >> > (d_split_node, maxNodes);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
gpuErrchk(hipDeviceSynchronize());
hipEventElapsedTime(&elapsedTime, start, stop);
printf("******** cont the number of zeros in split array = %0.5f ms \n", elapsedTime);
hipMemcpyFromSymbol(&h_split_array_zero, d_split_array_zero, sizeof(unsigned int), 0, hipMemcpyDeviceToHost);
printf("h_split_zero:%i and non_split_zero :%i and leaves: %i\n", h_split_array_zero, (maxNodes - h_split_array_zero), (gpu_nodes - (maxNodes - h_split_array_zero)));
totalBuildingTime = totalBuildingTime + elapsedTime;
split = h_split_array_zero;
int non_split_zero = maxNodes - split;
//total number of leaves
int leaves = gpu_nodes - non_split_zero;
printf("number os split is: %i and number of leaves is: %i\n", non_split_zero, leaves);
// int sum = 0;
// sum= thrust::count(thrust::device, dev_ptr, (dev_ptr + maxNodes) , 0);
// //total number of nodes
// int gpu_nodes= maxNodes - sum ;
// printf("zero is:%i and nodes is: %i \n", sum, gpu_nodes);
//
// split= thrust::count(thrust::device, dev_split, (dev_split + maxNodes) , 0);
// //non leaf nodes
// int non_split_zero= maxNodes - split;
// //total number of leaves
// int leaves = gpu_nodes - non_split_zero;
// printf("number os split is: %i and number of leaves is: %i\n", non_split_zero, leaves);
// int numberOfNodes;
hipMalloc((void**)&d_NODE, sizeof(NODE)*gpu_nodes);
hipMemset(d_NODE, 0, sizeof(NODE)*gpu_nodes);
hipMalloc((void**)&d_POINT_x, sizeof(float)*numPoints);
hipMalloc((void**)&d_POINT_y, sizeof(float)*numPoints);
hipMalloc((void**)&d_POINT_nodeid, sizeof(NODEID)*numPoints);
hipMemset(d_POINT_nodeid, 0, sizeof(NODEID)*numPoints);
hipMemset(d_POINT_x, 0, sizeof(float)*numPoints);
hipMemset(d_POINT_y, 0, sizeof(float)*numPoints);
hipMalloc((void**)&d_POINT_id, sizeof(int)*numPoints);
hipMemset(d_POINT_id, 0, sizeof(int)*numPoints);
hipMalloc((void**)&d_leave_list, sizeof(int)*leaves);
hipMemset(d_leave_list, 0, sizeof(int)*leaves);
hipMalloc((void**)&leaf_m_address, sizeof(unsigned long long int));
hipMemset(leaf_m_address, 0, sizeof(unsigned long long int));
gpuErrchk(hipPeekAtLastError());
//create root Node
hipEventRecord(start, 0);
createRootNodeKernel << <grid0, threads0 >> > (0.0, 0.0, sqrange, sqrange, d_NODE, d_node_id);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
gpuErrchk(hipDeviceSynchronize());
hipEventElapsedTime(&elapsedTime, start, stop);
printf("******** Total Running Time of creating root node = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
//create nodes and allocate memory for that
hipEventRecord(start, 0);
createParentNodesKernel << <grid1, threads1 >> > (0.0, 0.0, sqrange, sqrange, d_NODE, d_node_counter, d_split_node, maxNodes, d_node_id, d_level, d_positions);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
gpuErrchk(hipDeviceSynchronize());
hipEventElapsedTime(&elapsedTime, start, stop);
printf("******** Total Running Time of setting node kernel = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
hipMemcpyFromSymbol(&h_node_allocate, d_node_allocate, sizeof(unsigned int), 0, hipMemcpyDeviceToHost);
printf("number of allocated nodes is: %i \n", h_node_allocate);
// err = hipMemPrefetchAsync(d_NODE, sizeof(NODE)*gpu_nodes , 0);
// err = hipMemPrefetchAsync(d_leave_list, sizeof(int)*leaves , 0);
//set the links between childrean and parents
hipEventRecord(start, 0);
finalNodesSetUpKernel << <grid1, threads1 >> > (d_NODE, d_node_counter, d_split_node, maxNodes, d_node_id, d_leave_list, leaf_m_address);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
gpuErrchk(hipDeviceSynchronize());
hipEventElapsedTime(&elapsedTime, start, stop);
printf("******** Total Running Time of final node kernel = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
hipMemcpyFromSymbol(&h_point_allocate, d_point_allocate, sizeof(unsigned int), 0, hipMemcpyDeviceToHost);
printf("number of allocated points is: %i number of point is:%i\n", h_point_allocate, numPoints);
int insert_block_num;
if (numPoints % INSERT_BLOCK_SIZE == 0) {
insert_block_num = numPoints / INSERT_BLOCK_SIZE;
}
else {
insert_block_num = numPoints / INSERT_BLOCK_SIZE + 1;
}
dim3 grid3(insert_block_num, 1, 1);
dim3 threads3(INSERT_BLOCK_SIZE, 1, 1);
// err = hipMemPrefetchAsync(d_POINT_x, sizeof(float)*numPoints , 0);
// err = hipMemPrefetchAsync(d_POINT_y, sizeof(float)*numPoints , 0);
// err = hipMemPrefetchAsync(d_POINT_id, sizeof(int)*numPoints , 0);
//insert into leaf nodes
hipEventRecord(start, 0);
insertIntoLeafNodes << <grid3, threads3 >> > (d_node_id, d_query_POINT_x, d_query_POINT_y, d_query_POINT_id, d_NODE, d_POINT_x, d_POINT_y, d_POINT_id, d_point_node, numPoints, d_POINT_nodeid);
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
gpuErrchk(hipDeviceSynchronize());
hipEventElapsedTime(&elapsedTime, start, stop);
printf("******** Total Running Time of inserting to leaves = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
printf("********** total tree construction time = %0.5f ms \n", totalBuildingTime);
/////////////////////////////////////////////////
// sets the CPU historam //
/////////////////////////////////////////////////
PDH_acnt = numPoints;
if (argc > 4)
{
PDH_res = atof(argv[4]);
}
num_buckets = (int)(RANGE * 1.732 / PDH_res) + 1;
CPU_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
GPU_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
/* sets CPU and GPU histograms to zero */
int z = 0;
for (z = 0; z < num_buckets; z++)
{
CPU_histogram[z].d_cnt = 0;
GPU_histogram[z].d_cnt = 0;
}
/*
for (z = 0; z < numPoints; z++)
{
printf("\n(%f,%f) %i", atom_list[z].x_pos, atom_list[z].y_pos, z);
}
*/
gettimeofday(&startTime, &Idunno);
PDH_baseline();
report_running_time();
printf("\nCPU results:\n");
output_histogram(CPU_histogram);
/////////////////////////////////////////////////
// end of the CPU historam //
/////////////////////////////////////////////////
hipFree(d_level);
hipFree(d_node_counter);
hipFree(d_split_node);
hipFree(d_positions);
hipFree(d_query_POINT_x);
hipFree(d_query_POINT_y);
hipFree(d_query_POINT_id);
hipFree(d_leave_list);
hipFree(leaf_m_address);
hipFree(d_node_id);
hipFree(d_point_node);
hipFree(d_POINT_id);
hipEventDestroy(start);
hipEventDestroy(stop);
hipEventDestroy(start1);
hipEventDestroy(stop1);
free(h_POINT_x);
free(h_POINT_y);
free(h_POINT_id);
tree_path *tree;
hipMalloc((void**)&tree, sizeof(tree_path)*gpu_nodes);
CUDA_RebuildTree << <1, 1, 1 >> >(d_NODE, gpu_nodes, tree);
int cuda_block_size = 128;
if (argc > 5)
{
cuda_block_size = atoi(argv[5]);
}
int cuda_block_number = ceil(PDH_acnt / cuda_block_size) + 1;
Quad_Tree_Traversal(num_buckets, cuda_block_number, cuda_block_size, gpu_nodes, tree);
free(CPU_histogram);
free(atom_list);
hipFree(d_NODE);
hipFree(d_POINT_x);
hipFree(d_POINT_y);
hipFree(d_POINT_nodeid);
hipFree(tree);
return 0;
} | a4d809048f741f8b1c36759b5f8f606a6ed6b6b2.cu | /*
Project 3
Summer 2018
Brian Pinson
Karshan Arjun
Mark Tushemereiwe
*/
/** *******************************************************************
* File name : quadtreeGPU.cu
* Construct quadtree in CPU. The version with all edited function
*
** *******************************************************************/
/**<************************# Includes ********************************/
#include<stdio.h>
#include<stdlib.h>
#include"MemoryManager.h"
#include<unistd.h>
#include<sys/time.h>
#include <stdbool.h>
#include<stdlib.h>
#include<cstdlib>
#include <cuda.h>
#include <math.h>
#include <curand.h>
#include <curand_kernel.h>
#include<time.h>
#include<string.h>
#include <iostream>
#include <cmath>
#include <limits>
#include <float.h>
#include <thrust/count.h>
#include <thrust/device_vector.h>
#include <thrust/execution_policy.h>
#ifdef __CDT_PARSER__
/**<************************# Defines *********************************/
#define __host__
#define __shared__
#define CUDA_KERNEL_DIM(...)
#else
#define CUDA_KERNEL_DIM(...) <<< __VA_ARGS__ >>>
#endif
#define BUILD_FULL 1
#define BUILD_ADAPTIVE 2
#define MODE_RANDOM 1
#define MODE_FILE 2
#define TRUE 1
#define FALSE 0
#define pMax 32
#ifndef RANGE
#define RANGE 24000
//#define RANGE 1024
#endif
#define BLOCK_SIZE 1024
#define CUDA_BLOCK_SIZE 64
#define STACK_MAX 36
#define BUFFER_SIZE 1024
#define Leaf_SIZE 1024
#define INSERT_BLOCK_SIZE 1024
#define PAGE_SIZE 40
#define NB_PAGE_SIZE 50
#define LEAF_BUFFER_SIZE 1024
#define MAX_LEAF_CAPACITY 5120
__device__ __constant__ int bucket_size;
__device__ __constant__ int max_levels = 10;
__constant__ long long PDH_acnt_CUDA; // constant memory number of points
__constant__ double PDH_res_CUDA; // constant memory width size
extern __shared__ double sharedMemory[]; // shared memory to contain points
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true)
{
if (code != cudaSuccess)
{
fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
//typedef int POINTID;
//typedef int NODEID;
//typedef int BUFFID;
/* These are for an old way of tracking time */
struct timezone Idunno;
struct timeval startTime, endTime;
/* helps keep track of tree child nodes*/
struct tree_path
{
NODEID child[4];
};
/*
int numLevels = 10;
int maxNodes=349525;
int maxLeaves=262144;
int maxLeafParent=65536;
//level 8
int maxNodes=21845;
int maxLeaves=16384;
int maxLeafParent=4096;
*/
/**<***************** Global variables ****************************/
int pointMode = MODE_RANDOM;
char *inputPointFileName;
char *outputTreeFileName;
int rangeSize = RANGE;
//int bucketSize = 512;
int bucketSize = 1024;
//int numPoints = 8192000;
//int numPoints = 409600;
int numPoints = 16384000;
int numLevels = 10;
int maxNodes = 349525;
int maxLeaves = 262144;
int maxLeafParent = 65536;
int numSearches = 10;
int printTree = 1;
int outputTree = 0;
int quadTreeMode = BUILD_FULL;
//int quadTreeMode = BUILD_ADAPTIVE;
//int numPolygon = 1099120;
int pointRangeX = RANGE;
int pointRangeY = RANGE;
int completeIndex = 0;
int NotIndex = 0;
int PartialIndex = 0;
int arraysize = 100;
int globalLevel = 0;
int globalpoint = 0;
/**<***************** enums ******************************/
//enum {
// TYPE_NONE = 0, TYPE_ROOT, TYPE_LINK, TYPE_LEAF, TYPE_INV
//};
//
//enum {
// FullyOverlapped = 0, PartiallyOverlapped
//};
//for tree construction
int *d_node_counter;
int * d_split_node;
int * d_node_id;
int * d_level;
int* d_point_node;
__device__ unsigned int d_node_allocate = 0;
__device__ unsigned int d_point_allocate = 0;
//define constant
//__device__ unsigned int d_max_level= 0;
unsigned int h_node_allocate = 0;
unsigned int h_point_allocate = 0;
struct buffer {
//int id;
int leafId;
int numberOfQueries;
unsigned long int queries[BUFFER_SIZE];
};
typedef struct LEAF_BUFFER {
// Array of points
unsigned long int queryList[LEAF_BUFFER_SIZE];
//unsigned int querytCount;
//unsigned long int nextBufferId;
} LEAF_BUFFER;
struct Output {
unsigned long long int offset[7];
int page_num;
}Output;
float *d_query_POINT_x;
float *d_query_POINT_y;
int *d_query_POINT_id;
float2 *d_positions;
unsigned long long int *leaf_m_address;
int* d_POINT_nodeid;
//for output
struct Output *d_output;
struct Output *d_output_nonBuffer;
struct Output *h_output;
__device__ unsigned int d_leaves_allocate = 0;
__device__ unsigned int d_leaf_blocks = 0;
int* d_leave_list;
__device__ int d_zeros = 0;
unsigned int h_zeros = 0;
//for saving the intersecting leaves
int *d_intersecting_leave_nodes; //save intersecting leave nodes
int *d_intersecting_leave_count; //count the intersection
__device__ int d_counter_one = 0;
unsigned int h_counter_one = 0;
__device__ int d_split_array_zero = 0;
unsigned int h_split_array_zero = 0;
__global__ void setRootNodeKernel(float xPos, float yPos, int *d_node_counter, int *d_split_node, int *d_level, float2 *d_positions, int numberOfPoints) {
d_node_counter[0] = numberOfPoints;
d_split_node[0] = 1;
d_positions[0].x = xPos;
d_positions[0].y = yPos;
d_level[0] = 0;
}
//get direction
__device__ int getNodeDirection(float posX, float posY, float width, float height, float x, float y) {
if ((x >= posX) && (x < posX + width) && (y >= posY + height)
&& (y < posY + height + height)) {
return 0;
}
else if ((x >= posX + width) && (x < posX + width + width) && (y >= posY + height)
&& (y < posY + height + height)) {
return 1;
}
else if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
return 2;
}
else if ((x >= posX + width) && (x < posX + width + width) && (y >= posY)
&& (y < posY + height)) {
return 3;
}
else {
return -1;
}
}
__global__ void countThePointsInPositions(float width, float height, int level, float* d_queries_x, float* d_queries_y, int *d_node_counter, int *d_split_node, int *d_level, int numberOfthreads, int blocks_num, float2 *d_positions, int *d_point_node) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < numberOfthreads) {
register float x = d_queries_x[tid];
register float y = d_queries_y[tid];
register int myCount = 0;
register int direction = -1;
register int node_Id = d_point_node[tid];
register float posX = d_positions[node_Id].x;
register float posY = d_positions[node_Id].y;
register int mem_position;
if (d_split_node[node_Id] == 1) {
direction = getNodeDirection(posX, posY, width, height, x, y);
if (direction != -1) {
mem_position = (((node_Id * 4) + direction) + 1);
d_point_node[tid] = mem_position;
// if (tid ==0){
// printf("x:%f, y: %f , direction:%i, node_id:%i, dir:%i , xpos:%f, ypos:%f \n", x, y, direction, node_Id, mem_position, posX, posY);
// }
if ((d_split_node[mem_position] == 0 || (level == max_levels))) {
//&& d_split_node[mem_position]==0
myCount = atomicAdd(&d_node_counter[mem_position], 1);
if (myCount == bucket_size && (level < max_levels)) {
d_split_node[mem_position] = 1;
d_level[mem_position] = level;
// float width = pWidth / 2.00;
// float height = pHeight / 2.00;
//
switch (direction) {
case 0: // NW
posX = posX;
posY = posY + height;
d_positions[mem_position].x = posX;
d_positions[mem_position].y = posY;
break;
case 1: // NE
posX = posX + width;
posY = posY + height;
d_positions[mem_position].x = posX;
d_positions[mem_position].y = posY;
break;
case 2: // SW
posX = posX;
posY = posY;
d_positions[mem_position].x = posX;
d_positions[mem_position].y = posY;
break;
case 3: // SE
posX = posX + width;
posY = posY;
d_positions[mem_position].x = posX;
d_positions[mem_position].y = posY;
break;
}
// printf("tid: %li, node id:%i, xpos:%f, ypos:%f, dplit:%i\n", tid, mem_position, posX, posY, d_split_node[mem_position]);
}
}
}
}
}
__syncthreads();
}
__device__ inline void device_setNode(NODEID nodeid, float x, float y, float w, float h, int type, int level, int parentIndex, NODE* d_NODE, int open) {
// Get memory for node.
// Set the 5 parameters.
d_NODE[nodeid].index = nodeid;
d_NODE[nodeid].posX = x;
d_NODE[nodeid].posY = y;
d_NODE[nodeid].width = w;
d_NODE[nodeid].height = h;
d_NODE[nodeid].level = level;
// Reset all of the tracking values.
int i;
for (i = 0; i < 4; i++)
{
d_NODE[nodeid].child[i] = -1;
//node->count[i] = 0;
}
d_NODE[nodeid].total = 0;
//node->index = 0;
//node->offset = 0;
d_NODE[nodeid].open = open;
d_NODE[nodeid].type = type;
d_NODE[nodeid].pBuffer = -1;
d_NODE[nodeid].parent_index = parentIndex;
d_NODE[nodeid].leafBufferStart = -1;
d_NODE[nodeid].totalRegisterQuery = 0;
//d_NODE[nodeid].newCount=0;
}
__device__ inline int getDirection(unsigned long long int tid) {
int direction = (tid % 4);
int actualDirection;
switch (direction) {
case 0:
//child SE dir =3
actualDirection = 3;
break;
case 1:
//child NW dir =0
actualDirection = 0;
break;
case 2:
//child NE dir=1
actualDirection = 1;
break;
case 3:
//child SW dir =2
actualDirection = 2;
break;
}
return actualDirection;
}
__global__ void createRootNodeKernel(float posX, float posY, float pWidth, float pHeight, struct NODE* d_NODE, int *d_node_id) {
register int myindex = 0;
myindex = atomicAdd(&d_node_allocate, 1);
d_node_id[0] = myindex;
device_setNode(myindex, posX, posY, pWidth, pHeight, TYPE_ROOT, 0, -1, d_NODE, false);
}
__global__ void createParentNodesKernel(float posX, float posY, float pWidth, float pHeight, struct NODE* d_NODE, int *d_node_counter, int *d_split_node, int maxNodes, int *d_node_id, int *d_level, float2 *d_positions) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < maxNodes && d_node_counter[tid] != 0 && tid != 0) {
register int myindex = 0;
myindex = atomicAdd(&d_node_allocate, 1);
d_node_id[tid] = myindex;
// if (tid == 0){
//
// device_setNode(myindex, posX, posY, pWidth, pHeight, TYPE_ROOT, 0, 0, d_NODE, false);
// //printf("my index is:%i \n", myindex);
// }
// else {
register int direction = getDirection(tid);
register int parent;
parent = (tid - direction - 1) / 4;
register int level;
register float xPos;
register float yPos;
register int type;
register float width;
register float height;
register int open;
// register int total;
if (d_split_node[tid] == 1) {
//this is a link node
level = d_level[tid];
xPos = d_positions[tid].x;
yPos = d_positions[tid].y;
type = TYPE_LINK;
width = pWidth / (float)(pow((float)2, (float)level));
height = pHeight / (float)(pow((float)2, (float)level));
open = FALSE;
// total= d_node_counter[tid];
}
else {
//this is a leaf node
level = d_level[parent] + 1;
type = TYPE_LEAF;
xPos = d_positions[parent].x;
yPos = d_positions[parent].y;
width = pWidth / (float)(pow((float)2, (float)level));
height = pHeight / (float)(pow((float)2, (float)level));
open = TRUE;
// total =0;
switch (direction) {
case 0:
//child SE
xPos = xPos;
yPos = yPos + height;
break;
case 1:
//child NW
xPos = xPos + width;
yPos = yPos + height;
break;
case 2:
//child NE
xPos = xPos;
yPos = yPos;
break;
case 3:
//child SW
xPos = xPos + width;
yPos = yPos;
break;
}
}
// if (tid==1 ){
// printf("my index is:%i , direction is: %i , parent is:%i, total:%i, open:%i, level:%i, xpos:%f, ypos:%f, width:%f, height:%f, type:%i \n", myindex, direction , parent, d_node_counter[tid], open, level, xPos, yPos, width, height, type );
// }
device_setNode(myindex, xPos, yPos, width, height, type, level, parent, d_NODE, open);
// }
}
__syncthreads();
}
//__global__ void finalNodesSetUpKernel( struct NODE* d_NODE, int *d_node_counter, int *d_split_node, int maxNodes, int *d_node_id, int *d_leave_list, unsigned long long int *Address, unsigned long long int *d_leaf_buffer_list){
__global__ void finalNodesSetUpKernel(struct NODE* d_NODE, int *d_node_counter, int *d_split_node, int maxNodes, int *d_node_id, int *d_leave_list, unsigned long long int *Address) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < maxNodes) {
if (d_node_counter[tid] != 0 && tid != 0) {
register int nodeid = d_node_id[tid];
register int parentNodeId = d_node_id[d_NODE[nodeid].parent_index];
d_NODE[nodeid].parent_index = parentNodeId;
register int direction = getDirection(tid);
d_NODE[parentNodeId].child[direction] = nodeid;
if (d_split_node[tid] != 1) {
//this is a leaf
register int myindex = 0;
myindex = atomicAdd(&d_point_allocate, d_node_counter[tid]);
d_NODE[nodeid].pBuffer = myindex;
myindex = atomicAdd(&d_leaves_allocate, 1);
d_leave_list[myindex] = nodeid;
unsigned long long int offsetAddress = atomicAdd(Address, BUFFER_SIZE);
d_NODE[nodeid].leafBufferStart = offsetAddress;
d_NODE[nodeid].totalRegisterQuery = 0;
}
}
}
__syncthreads();
}
//__device__ NODEID findQuadTreeNodeCuda(NODEID nParentid, float x, float y, NODE* d_NODE, unsigned long long int tid ) {
__device__ NODEID findQuadTreeNodeCuda(NODEID nParentid, float x, float y, NODE* d_NODE) {
register float posX, posY;
register int index;
if (nParentid == -1)
return nParentid;
register NODE nParent = d_NODE[nParentid];
if (nParent.type == TYPE_LEAF)
return nParentid;
// Get the point.
// Child width and height
register float width;
register float height;
// if (tid ==0){
// printf("nparent is: %i , with:%f, height:%f, child0:%i, child1:%i, child2:%i, child3:%i\n", nParentid, nParent.width , nParent.height, nParent.child[0], nParent.child[1], nParent.child[2], nParent.child[3]);
// }
while (nParent.type != TYPE_LEAF) {
width = nParent.width / 2.00;
height = nParent.height / 2.00;
for (index = 0; index < 4; index++) {
switch (index) {
case 0: // NW
posX = nParent.posX;
posY = nParent.posY + height;
if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
nParentid = nParent.child[0];
}
break;
case 1: // NE
posX = nParent.posX + width;
posY = nParent.posY + height;
if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
nParentid = nParent.child[1];
}
break;
case 2: // SW
posX = nParent.posX;
posY = nParent.posY;
if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
nParentid = nParent.child[2];
}
break;
case 3: // SE
posX = nParent.posX + width;
posY = nParent.posY;
if ((x >= posX) && (x < posX + width) && (y >= posY)
&& (y < posY + height)) {
nParentid = nParent.child[3];
}
break;
}
}
if (nParentid == -1)
return nParentid;
nParent = d_NODE[nParentid];
// if (tid ==0){
// printf("nparent is: %i \n", nParentid);
// }
}
return nParentid;
}
__global__ void insertIntoLeafNodes(int *d_node_id, float* d_query_POINT_x, float* d_query_POINT_y, int *d_query_POINT_id, NODE* d_NODE, float *d_POINT_x, float *d_POINT_y, int *d_POINT_id, int *d_point_node, int numPoints, NODEID *d_POINT_nodeid) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < numPoints) {
register int myindex;
register NODEID leaf = d_node_id[d_point_node[tid]];
register float x = d_query_POINT_x[tid];
register float y = d_query_POINT_y[tid];
register int index = d_query_POINT_id[tid];
if (d_NODE[leaf].type == TYPE_LEAF) {
myindex = atomicAdd(&d_NODE[leaf].total, 1);
if ((myindex <bucket_size && d_NODE[leaf].pBuffer != -1) || (myindex >= bucket_size && d_NODE[leaf].level == max_levels) && d_NODE[leaf].pBuffer != -1) {
d_POINT_id[(d_NODE[leaf].pBuffer + myindex)] = index;
d_POINT_x[(d_NODE[leaf].pBuffer + myindex)] = x;
d_POINT_y[(d_NODE[leaf].pBuffer + myindex)] = y;
d_POINT_nodeid[(d_NODE[leaf].pBuffer + myindex)] = leaf;
}
}
}
__syncthreads();
}
/***************************************** end of building the tree ***************************/
/*
search on GPU
*/
//non Buffer range search
__global__ void countTheNumberOfZeros(int *d_split_node, int startLevelNode, int numberOfActiveThreads) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < numberOfActiveThreads) {
if (d_split_node[startLevelNode + tid] == 0) {
atomicAdd(&d_zeros, 1);
}
}
__syncthreads();
}
//count the number of non-empty nodes in the tree
__global__ void countTheOnesInCounterArray(int *d_node_counter, int maxNodes) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < maxNodes) {
if (d_node_counter[tid] != 0) {
atomicAdd(&d_counter_one, 1);
}
}
__syncthreads();
}
//count the number of non link nodes in the tree
__global__ void countTheNonLeafNodes(int *d_split_node, int maxNodes) {
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < maxNodes) {
if (d_split_node[tid] == 0) {
atomicAdd(&d_split_array_zero, 1);
}
}
__syncthreads();
}
/*
Rebuilds the Quadtree to make it work
*/
__global__ void CUDA_RebuildTree(NODE * d_NODE, int num_of_nodes, tree_path *tree)
{
int i = 0;
for (i = 0; i < num_of_nodes; i++)
{
int j = 0;
// printf("node %i", d_NODE[i].index);
for (j = 0; j < 4; j++)
{
tree[i].child[j] = d_NODE[i].child[j];
// printf(" child %i", tree[i].child[j]);
}
// printf(" parent %i\n", d_NODE[i].parent_index);
}
}
/*&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&
CPU Calculator
&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&&*/
/* descriptors for single atom in the tree */
typedef struct atomdesc
{
double x_pos;
double y_pos;
} atom;
typedef struct hist_entry
{
long long d_cnt; /* need a long long type as the count might be huge */
} bucket;
bucket * CPU_histogram; /* list of all buckets in the histogram */
bucket * GPU_histogram; /* list of all buckets in the histogram */
long long PDH_acnt; /* total number of data points */
int num_buckets; /* total number of buckets in the histogram */
double PDH_res; /* value of w */
atom * atom_list; /* list of all data points */
/* These are for an old way of tracking time */
/*
distance of two points in the atom_list
*/
double p2p_distance(int ind1, int ind2) {
double x1 = atom_list[ind1].x_pos;
double x2 = atom_list[ind2].x_pos;
double y1 = atom_list[ind1].y_pos;
double y2 = atom_list[ind2].y_pos;
return sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2));
}
/*
brute-force SDH solution in a single CPU thread
*/
int PDH_baseline() {
int i, j, h_pos;
double dist;
for (i = 0; i < PDH_acnt; i++) {
for (j = i + 1; j < PDH_acnt; j++) {
dist = p2p_distance(i, j);
h_pos = (int)(dist / PDH_res);
CPU_histogram[h_pos].d_cnt++;
}
}
return 0;
}
/*
set a checkpoint and show the (natural) running time in seconds
*/
double report_running_time()
{
long sec_diff, usec_diff;
gettimeofday(&endTime, &Idunno);
sec_diff = endTime.tv_sec - startTime.tv_sec;
usec_diff = endTime.tv_usec - startTime.tv_usec;
if (usec_diff < 0) {
sec_diff--;
usec_diff += 1000000;
}
printf("\n\nRunning time for CPU version: %ld.%06ld\n", sec_diff, usec_diff);
return (double)(sec_diff*1.0 + usec_diff / 1000000.0);
}
/*
print the counts in all buckets of the histogram
*/
void output_histogram(bucket * input_histogram)
{
int i;
long long total_cnt = 0;
for (i = 0; i< num_buckets; i++) {
if (i % 5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", input_histogram[i].d_cnt);
total_cnt += input_histogram[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if (i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
/* Prints difference between buckets by using an altered histogram printing function */
void histogram_comparison(bucket *input1, bucket *input2)
{
printf("Difference Between CPU and CUDA histograms: \n");
int i;
long long total_cnt = 0;
for (i = 0; i< num_buckets; i++) {
if (i % 5 == 0) /* we print 5 buckets in a row */
printf("\n%02d: ", i);
printf("%15lld ", input2[i].d_cnt - input1[i].d_cnt);
total_cnt += input1[i].d_cnt;
/* we also want to make sure the total distance count is correct */
if (i == num_buckets - 1)
printf("\n T:%lld \n", total_cnt);
else printf("| ");
}
}
/* returns distance */
__device__ double CUDA_distance_calculator(double x1, double y1, double x2, double y2)
{
return sqrt((x1 - x2)*(x1 - x2) + (y1 - y2)*(y1 - y2));
}
//////////////////////////////////////////////////
//////////////////////////////////////////////////
/* Quad Tree Traversal */
/* Histogram Calculator */
//////////////////////////////////////////////////
//////////////////////////////////////////////////
__global__ void CUDA_Calculate_Histogram(bucket *histogram_cuda, NODE *d_NODE, int num_of_nodes, float *d_POINT_x, float *d_POINT_y, NODEID *d_POINT_nodeid, tree_path *tree, int bucket_num)
{
const unsigned long long int tid = threadIdx.x + (blockIdx.x*blockDim.x);
if (tid < PDH_acnt_CUDA)
{
NODEID no = d_POINT_nodeid[tid]; // node id
register double x = d_POINT_x[tid]; // x coordinate
register double y = d_POINT_y[tid]; // y coordinate
NODE node = d_NODE[no]; // sets node
double distance = 0;
int h_pos = 0;
int *SHMOut = (int *)sharedMemory;
int i = 0;
int j = 0;
if(threadIdx.x == 0)
for (; i < bucket_num; i++) SHMOut[i] = 0;
__syncthreads();
for (i = tid - node.pBuffer + 1; i < node.total; i++) // scans through current node, finds point's index and calculates histogram for all points of a higher index
{
distance = CUDA_distance_calculator(x, y, d_POINT_x[node.pBuffer + i], d_POINT_y[node.pBuffer + i]);
h_pos = (int)(distance / PDH_res_CUDA);
atomicAdd(&SHMOut[h_pos], 1);
}
for (i = no + 1; i < num_of_nodes; i++) // scans through all nodes greater than current node
{
node = d_NODE[i];
for (j = 0; j < node.total; j++) // calculates histogram for all other points
{
distance = CUDA_distance_calculator(x, y, d_POINT_x[node.pBuffer + j], d_POINT_y[node.pBuffer + j]);
h_pos = (int)(distance / PDH_res_CUDA);
atomicAdd(&SHMOut[h_pos], 1);
}
}
__syncthreads();
if (threadIdx.x == 0)
{
for (i = 0; i < bucket_num; i++)
{
atomicAdd((unsigned long long int*) &histogram_cuda[i].d_cnt, (unsigned long long int) SHMOut[i]);
}
}
}
}
//////////////////////////////////////////////////
//////////////////////////////////////////////////
/* Quad Tree Kernel */
/* Sets up and Launches the CUDA kernel */
//////////////////////////////////////////////////
//////////////////////////////////////////////////
void Quad_Tree_Traversal(int num_buckets, int grid, int threads, int gpu_nodes, tree_path *tree)
{
bucket *cuda_histogram = NULL; /* Mallocs histogram in GPU */
cudaMalloc((void **)&cuda_histogram, num_buckets * sizeof(bucket));
cudaMemcpy(cuda_histogram, GPU_histogram, num_buckets * sizeof(bucket), cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(PDH_acnt_CUDA, &PDH_acnt, sizeof(signed long long)); // constant memory atom size
cudaMemcpyToSymbol(PDH_res_CUDA, &PDH_res, sizeof(double)); // constant memory width sizes
float elapsedTime = 0;
cudaEvent_t start_time, stop_time;
cudaEventCreate(&start_time);
cudaEventCreate(&stop_time);
cudaEventRecord(start_time, 0);
CUDA_Calculate_Histogram <<< grid, threads,num_buckets * sizeof(int)>>> (cuda_histogram, d_NODE, gpu_nodes, d_POINT_x, d_POINT_y, d_POINT_nodeid, tree, num_buckets);
cudaEventRecord(stop_time, 0);
cudaEventSynchronize(stop_time);
cudaEventElapsedTime(&elapsedTime, start_time, stop_time);
cudaEventDestroy(start_time);
cudaEventDestroy(stop_time);
cudaMemcpy(GPU_histogram, cuda_histogram, num_buckets * sizeof(bucket), cudaMemcpyDeviceToHost);
cudaFree(cuda_histogram);
printf("\nCUDA Kernel results:\n");
printf("Time to generate: %0.5f ms\n\n", elapsedTime);
output_histogram(GPU_histogram);
printf("\n");
histogram_comparison(CPU_histogram, GPU_histogram);
free(GPU_histogram);
}
/////////////////////////
/////////////////////////
/* End Quad Tree traversal */
/////////////////////////
/////////////////////////
/**<************************ Main function ***************************/
/**
* Two techniques to build QuadTrees
* 1- full : extend all the way down, only leafs hold points
* : counts are kept at intermediate levels
* : nulls are still used to know where points are.
* 2- adaptive : items are pushed around as needed to form tree
* : points of LIMIT pushed down.
** ******************************************************************/
int main(int argc, char **argv) {
if (argc < 4)
{
printf("you should insert the number of points, mmaximum number of points alowed in each node, and maximum number of levels alowed in the tree to the program to run\n");
return 1;
}
//number of points in the tree
unsigned long long int numberOfthreads = atoi(argv[1]);
numPoints = numberOfthreads;
//mmaximum number of points alowed in each node
bucketSize = atoi(argv[2]);
//maximum number of levels alowed in the tree
numLevels = atoi(argv[3]);
//maximum number of possible nodes based on the numLevels
maxNodes = ((pow(4, numLevels)) - 1) / 3;
printf("maxNodes is:%i \n", maxNodes);
PDH_res = 500;
// unsigned long long int numberOfthreads = numPoints;
cudaError_t err = cudaSetDevice(0);
float *h_POINT_x = (float *)malloc(numPoints * sizeof(float));
float *h_POINT_y = (float *)malloc(numPoints * sizeof(float));
int *h_POINT_id = (int *)malloc(numPoints * sizeof(int));
memset(h_POINT_x, 0, numPoints * sizeof(float));
memset(h_POINT_y, 0, numPoints * sizeof(float));
memset(h_POINT_id, 0, numPoints * sizeof(int));
atom_list = (atom *)malloc(sizeof(atom)*(numPoints));
long q;
srand(time(NULL));
//srand48(4);
for (q = 0; q<numPoints; q++) {
h_POINT_id[q] = q;
float x = ((float)(rand()) / RAND_MAX) * RANGE;
float y = ((float)(rand()) / RAND_MAX) * RANGE;
h_POINT_x[q] = x;
h_POINT_y[q] = y;
atom_list[q].x_pos = (double)x;
atom_list[q].y_pos = (double)y;
}
printf("start main \n");
//NODEID rootNode;
// Get memory for root node.
// Start node : root
//setNode(rootNode, 0, 0, rangeSize, rangeSize, TYPE_ROOT, 0, -1);
// Create the quadtree.
//srand48(4);
// Preallocate memory for all objects in CPU.
cudaMalloc((void**)&d_node_counter, sizeof(int)*maxNodes);
gpuErrchk(cudaPeekAtLastError());
cudaMalloc((void**)&d_split_node, sizeof(int)*maxNodes);
gpuErrchk(cudaPeekAtLastError());
cudaMalloc((void**)&d_node_id, sizeof(int)*maxNodes);
gpuErrchk(cudaPeekAtLastError());
cudaMalloc((void**)&d_level, sizeof(int)*maxNodes);
cudaMalloc((void**)&d_positions, sizeof(float2)*maxNodes);
cudaMalloc((void**)&d_query_POINT_x, sizeof(float)*numPoints);
cudaMalloc((void**)&d_query_POINT_y, sizeof(float)*numPoints);
cudaMalloc((void**)&d_point_node, sizeof(int)*numPoints);
cudaMalloc((void**)&d_query_POINT_id, sizeof(int)*numPoints);
gpuErrchk(cudaPeekAtLastError());
cudaMemset(d_node_counter, 0, sizeof(int)*maxNodes);
cudaMemset(d_split_node, 0, sizeof(int)*maxNodes);
cudaMemset(d_node_id, 0, sizeof(int)*maxNodes);
cudaMemset(d_level, 0, sizeof(int)*maxNodes);
cudaMemset(d_query_POINT_x, 0, sizeof(float)*numPoints);
cudaMemset(d_query_POINT_y, 0, sizeof(float)*numPoints);
cudaMemset(d_point_node, 0, sizeof(int)*numPoints);
cudaMemset(d_query_POINT_id, 0, sizeof(int)*numPoints);
cudaMemset(d_positions, 0, sizeof(float2)*maxNodes);
gpuErrchk(cudaPeekAtLastError());
// cudaMemcpyToSymbol(bucket_size, &bucketSize, sizeof(int), 0, cudaMemcpyHostToDevice);
cudaMemcpyToSymbol(bucket_size, &bucketSize, sizeof(int));
gpuErrchk(cudaPeekAtLastError());
cudaMemcpy(d_query_POINT_x, h_POINT_x, sizeof(float)*numPoints, cudaMemcpyHostToDevice);
cudaMemcpy(d_query_POINT_y, h_POINT_y, sizeof(float)*numPoints, cudaMemcpyHostToDevice);
cudaMemcpy(d_query_POINT_id, h_POINT_id, sizeof(int)*numPoints, cudaMemcpyHostToDevice);
gpuErrchk(cudaPeekAtLastError());
//thrust
thrust::device_ptr<int> dev_ptr(d_node_counter);
thrust::device_ptr<int> dev_split(d_split_node);
/* start counting time */
cudaEvent_t start, stop, start1, stop1;
//run the simulation
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventCreate(&start1);
cudaEventCreate(&stop1);
// err = cudaMemPrefetchAsync(d_query_POINT_x, sizeof(float)*numPoints, 0);
// err = cudaMemPrefetchAsync(d_query_POINT_y, sizeof(float)*numPoints, 0);
// err = cudaMemPrefetchAsync(d_query_POINT_id, sizeof(int)*numPoints, 0);
// err = cudaMemPrefetchAsync(d_node_counter, sizeof(int)*maxNodes , 0);
// err = cudaMemPrefetchAsync(d_split_node, sizeof(int)*maxNodes , 0);
// err = cudaMemPrefetchAsync(d_node_id, sizeof(int)*maxNodes , 0);
// err = cudaMemPrefetchAsync(d_level, sizeof(int)*maxNodes , 0);
// err = cudaMemPrefetchAsync(d_positions, sizeof(float2)*maxNodes , 0);
// err = cudaMemPrefetchAsync(d_point_node, sizeof(int)*numPoints , 0);
gpuErrchk(cudaPeekAtLastError());
float elapsedTime;
float totalBuildingTime = 0.0;
//int blocks_num = 2048000 / BLOCK_SIZE;
int blocks_num;
if (numPoints % BLOCK_SIZE == 0) {
blocks_num = numPoints / BLOCK_SIZE;
}
else {
blocks_num = numPoints / BLOCK_SIZE + 1;
}
int cuda_block_num;
if (numPoints % CUDA_BLOCK_SIZE == 0) {
cuda_block_num = numPoints / CUDA_BLOCK_SIZE;
}
else {
cuda_block_num = numPoints / CUDA_BLOCK_SIZE + 1;
}
printf("block num is: %i and cuda block num is:%i\n", blocks_num, cuda_block_num);
printf("BLOCK_SIZE is: %i \n", BLOCK_SIZE);
dim3 grid(blocks_num, 1, 1);
dim3 threads(BLOCK_SIZE, 1, 1);
printf("before calling the first kernel\n");
// printf("data point in gpu is %p \n", d_POINT+0);
//todo check the seed
//todo comment
//setup_kernel << <1, BLOCK_SIZE >> >(state, unsigned(time(NULL)) +1);
// gpuErrchk(cudaDeviceSynchronize());
// Size of quadrant
float sqrange = RANGE;
printf("sqrange is: %f \n", sqrange);
// call the setNode 0
dim3 grid0(1, 1, 1);
dim3 threads0(1, 1, 1);
cudaEventRecord(start1, 0);
setRootNodeKernel << <grid0, threads0 >> > (0.0, 0.0, d_node_counter, d_split_node, d_level, d_positions, numPoints);
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
cudaEventElapsedTime(&elapsedTime, start1, stop1);
printf("******** Total Running Time of creating root= %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
gpuErrchk(cudaDeviceSynchronize());
int level = 0;
int max_level = 10;
bool flag = true;
// int startLevelNode = pow (4, level);
// int endtLevelNode = pow (4, (level+1));
int startLevelNode = 1;
int endtLevelNode = 4;
int split = 0;
float width = float(RANGE) / 2.00;
float height = float(RANGE) / 2.00;
//for new function
int numberOfActiveThreads = 0;
int zeroCount_block_num;
dim3 threadsz(CUDA_BLOCK_SIZE, 1, 1);
int previousSplit = 0;
cudaEventRecord(start1, 0);
while (level < max_level && flag == true) {
countThePointsInPositions << <grid, threads >> > (width, height, level + 1, d_query_POINT_x, d_query_POINT_y, d_node_counter, d_split_node, d_level, numPoints, cuda_block_num, d_positions, d_point_node);
gpuErrchk(cudaDeviceSynchronize());
numberOfActiveThreads = endtLevelNode - startLevelNode + 1;
//printf("number of active threads is: %i \n ", numberOfActiveThreads);
if (numberOfActiveThreads % CUDA_BLOCK_SIZE == 0) {
zeroCount_block_num = numberOfActiveThreads / CUDA_BLOCK_SIZE;
}
else {
zeroCount_block_num = numberOfActiveThreads / CUDA_BLOCK_SIZE + 1;
}
dim3 gridz(zeroCount_block_num, 1, 1);
// cudaMemcpyToSymbol("d_zeros", &h_zeros, sizeof(int));
countTheNumberOfZeros << <gridz, threadsz >> > (d_split_node, startLevelNode, numberOfActiveThreads);
gpuErrchk(cudaDeviceSynchronize());
cudaMemcpyFromSymbol(&split, d_zeros, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost);
split = split - previousSplit;
// printf("level is: %i, start is:%i, end is: %i , split:%i \n", level, startLevelNode,endtLevelNode, split );
if (split == endtLevelNode - startLevelNode + 1) {
flag = false;
}
else {
flag = true;
width = width / 2.00;
height = height / 2.00;
level = level + 1;
startLevelNode = startLevelNode + pow(4, level);
endtLevelNode = startLevelNode + pow(4, (level + 1)) - 1;
previousSplit = previousSplit + split;
}
}
cudaEventRecord(stop1, 0);
cudaEventSynchronize(stop1);
printf("after calling the kernel\n");
cudaEventElapsedTime(&elapsedTime, start1, stop1);
printf("******** Total Running Time of inserting positions is = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
printf("level after kernel is: %i \n", level);
int node_blocks_num;
int NODE_BLOCK_SIZE = 32;
if (maxNodes % NODE_BLOCK_SIZE == 0) {
node_blocks_num = maxNodes / NODE_BLOCK_SIZE;
}
else {
node_blocks_num = maxNodes / NODE_BLOCK_SIZE + 1;
}
printf("maxNode is: %i, node block num is: %i \n", maxNodes, node_blocks_num);
dim3 grid1(node_blocks_num, 1, 1);
dim3 threads1(NODE_BLOCK_SIZE, 1, 1);
cudaEventRecord(start, 0);
countTheOnesInCounterArray << <grid1, threads1 >> > (d_node_counter, maxNodes);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
gpuErrchk(cudaDeviceSynchronize());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("******** cont the number of ones in counter array = %0.5f ms \n", elapsedTime);
cudaMemcpyFromSymbol(&h_counter_one, d_counter_one, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost);
printf("ones are: %i non empty nodes \n", h_counter_one);
int gpu_nodes = h_counter_one;
totalBuildingTime = totalBuildingTime + elapsedTime;
cudaEventRecord(start, 0);
countTheNonLeafNodes << <grid1, threads1 >> > (d_split_node, maxNodes);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
gpuErrchk(cudaDeviceSynchronize());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("******** cont the number of zeros in split array = %0.5f ms \n", elapsedTime);
cudaMemcpyFromSymbol(&h_split_array_zero, d_split_array_zero, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost);
printf("h_split_zero:%i and non_split_zero :%i and leaves: %i\n", h_split_array_zero, (maxNodes - h_split_array_zero), (gpu_nodes - (maxNodes - h_split_array_zero)));
totalBuildingTime = totalBuildingTime + elapsedTime;
split = h_split_array_zero;
int non_split_zero = maxNodes - split;
//total number of leaves
int leaves = gpu_nodes - non_split_zero;
printf("number os split is: %i and number of leaves is: %i\n", non_split_zero, leaves);
// int sum = 0;
// sum= thrust::count(thrust::device, dev_ptr, (dev_ptr + maxNodes) , 0);
// //total number of nodes
// int gpu_nodes= maxNodes - sum ;
// printf("zero is:%i and nodes is: %i \n", sum, gpu_nodes);
//
// split= thrust::count(thrust::device, dev_split, (dev_split + maxNodes) , 0);
// //non leaf nodes
// int non_split_zero= maxNodes - split;
// //total number of leaves
// int leaves = gpu_nodes - non_split_zero;
// printf("number os split is: %i and number of leaves is: %i\n", non_split_zero, leaves);
// int numberOfNodes;
cudaMalloc((void**)&d_NODE, sizeof(NODE)*gpu_nodes);
cudaMemset(d_NODE, 0, sizeof(NODE)*gpu_nodes);
cudaMalloc((void**)&d_POINT_x, sizeof(float)*numPoints);
cudaMalloc((void**)&d_POINT_y, sizeof(float)*numPoints);
cudaMalloc((void**)&d_POINT_nodeid, sizeof(NODEID)*numPoints);
cudaMemset(d_POINT_nodeid, 0, sizeof(NODEID)*numPoints);
cudaMemset(d_POINT_x, 0, sizeof(float)*numPoints);
cudaMemset(d_POINT_y, 0, sizeof(float)*numPoints);
cudaMalloc((void**)&d_POINT_id, sizeof(int)*numPoints);
cudaMemset(d_POINT_id, 0, sizeof(int)*numPoints);
cudaMalloc((void**)&d_leave_list, sizeof(int)*leaves);
cudaMemset(d_leave_list, 0, sizeof(int)*leaves);
cudaMalloc((void**)&leaf_m_address, sizeof(unsigned long long int));
cudaMemset(leaf_m_address, 0, sizeof(unsigned long long int));
gpuErrchk(cudaPeekAtLastError());
//create root Node
cudaEventRecord(start, 0);
createRootNodeKernel << <grid0, threads0 >> > (0.0, 0.0, sqrange, sqrange, d_NODE, d_node_id);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
gpuErrchk(cudaDeviceSynchronize());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("******** Total Running Time of creating root node = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
//create nodes and allocate memory for that
cudaEventRecord(start, 0);
createParentNodesKernel << <grid1, threads1 >> > (0.0, 0.0, sqrange, sqrange, d_NODE, d_node_counter, d_split_node, maxNodes, d_node_id, d_level, d_positions);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
gpuErrchk(cudaDeviceSynchronize());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("******** Total Running Time of setting node kernel = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
cudaMemcpyFromSymbol(&h_node_allocate, d_node_allocate, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost);
printf("number of allocated nodes is: %i \n", h_node_allocate);
// err = cudaMemPrefetchAsync(d_NODE, sizeof(NODE)*gpu_nodes , 0);
// err = cudaMemPrefetchAsync(d_leave_list, sizeof(int)*leaves , 0);
//set the links between childrean and parents
cudaEventRecord(start, 0);
finalNodesSetUpKernel << <grid1, threads1 >> > (d_NODE, d_node_counter, d_split_node, maxNodes, d_node_id, d_leave_list, leaf_m_address);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
gpuErrchk(cudaDeviceSynchronize());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("******** Total Running Time of final node kernel = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
cudaMemcpyFromSymbol(&h_point_allocate, d_point_allocate, sizeof(unsigned int), 0, cudaMemcpyDeviceToHost);
printf("number of allocated points is: %i number of point is:%i\n", h_point_allocate, numPoints);
int insert_block_num;
if (numPoints % INSERT_BLOCK_SIZE == 0) {
insert_block_num = numPoints / INSERT_BLOCK_SIZE;
}
else {
insert_block_num = numPoints / INSERT_BLOCK_SIZE + 1;
}
dim3 grid3(insert_block_num, 1, 1);
dim3 threads3(INSERT_BLOCK_SIZE, 1, 1);
// err = cudaMemPrefetchAsync(d_POINT_x, sizeof(float)*numPoints , 0);
// err = cudaMemPrefetchAsync(d_POINT_y, sizeof(float)*numPoints , 0);
// err = cudaMemPrefetchAsync(d_POINT_id, sizeof(int)*numPoints , 0);
//insert into leaf nodes
cudaEventRecord(start, 0);
insertIntoLeafNodes << <grid3, threads3 >> > (d_node_id, d_query_POINT_x, d_query_POINT_y, d_query_POINT_id, d_NODE, d_POINT_x, d_POINT_y, d_POINT_id, d_point_node, numPoints, d_POINT_nodeid);
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
gpuErrchk(cudaDeviceSynchronize());
cudaEventElapsedTime(&elapsedTime, start, stop);
printf("******** Total Running Time of inserting to leaves = %0.5f ms \n", elapsedTime);
totalBuildingTime = totalBuildingTime + elapsedTime;
printf("********** total tree construction time = %0.5f ms \n", totalBuildingTime);
/////////////////////////////////////////////////
// sets the CPU historam //
/////////////////////////////////////////////////
PDH_acnt = numPoints;
if (argc > 4)
{
PDH_res = atof(argv[4]);
}
num_buckets = (int)(RANGE * 1.732 / PDH_res) + 1;
CPU_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
GPU_histogram = (bucket *)malloc(sizeof(bucket)*num_buckets);
/* sets CPU and GPU histograms to zero */
int z = 0;
for (z = 0; z < num_buckets; z++)
{
CPU_histogram[z].d_cnt = 0;
GPU_histogram[z].d_cnt = 0;
}
/*
for (z = 0; z < numPoints; z++)
{
printf("\n(%f,%f) %i", atom_list[z].x_pos, atom_list[z].y_pos, z);
}
*/
gettimeofday(&startTime, &Idunno);
PDH_baseline();
report_running_time();
printf("\nCPU results:\n");
output_histogram(CPU_histogram);
/////////////////////////////////////////////////
// end of the CPU historam //
/////////////////////////////////////////////////
cudaFree(d_level);
cudaFree(d_node_counter);
cudaFree(d_split_node);
cudaFree(d_positions);
cudaFree(d_query_POINT_x);
cudaFree(d_query_POINT_y);
cudaFree(d_query_POINT_id);
cudaFree(d_leave_list);
cudaFree(leaf_m_address);
cudaFree(d_node_id);
cudaFree(d_point_node);
cudaFree(d_POINT_id);
cudaEventDestroy(start);
cudaEventDestroy(stop);
cudaEventDestroy(start1);
cudaEventDestroy(stop1);
free(h_POINT_x);
free(h_POINT_y);
free(h_POINT_id);
tree_path *tree;
cudaMalloc((void**)&tree, sizeof(tree_path)*gpu_nodes);
CUDA_RebuildTree << <1, 1, 1 >> >(d_NODE, gpu_nodes, tree);
int cuda_block_size = 128;
if (argc > 5)
{
cuda_block_size = atoi(argv[5]);
}
int cuda_block_number = ceil(PDH_acnt / cuda_block_size) + 1;
Quad_Tree_Traversal(num_buckets, cuda_block_number, cuda_block_size, gpu_nodes, tree);
free(CPU_histogram);
free(atom_list);
cudaFree(d_NODE);
cudaFree(d_POINT_x);
cudaFree(d_POINT_y);
cudaFree(d_POINT_nodeid);
cudaFree(tree);
return 0;
} |
4889d41b94bc8475598ebe2012af150faa74fdf0.hip | // !!! This is a file automatically generated by hipify!!!
/*
count the number of match tuple in each partition and each thread
*/
#include <stdio.h>
#include <stdint.h>
#include <hip/hip_runtime.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count_partitioning(
TUPLE *t,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
hash = t[DEF + threadIdx.x + i*Dim].val % p_n;
L[hash*t_n + x]++;
}
}
}
__global__
void partitioning(
TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
}
| 4889d41b94bc8475598ebe2012af150faa74fdf0.cu | /*
count the number of match tuple in each partition and each thread
*/
#include <stdio.h>
#include <stdint.h>
#include <cuda.h>
#include <sys/time.h>
#include "tuple.h"
extern "C" {
__global__
void count_partitioning(
TUPLE *t,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int rows_n = rows_num;
int p_n = p_num;
int t_n = t_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
hash = t[DEF + threadIdx.x + i*Dim].val % p_n;
L[hash*t_n + x]++;
}
}
}
__global__
void partitioning(
TUPLE *t,
TUPLE *pt,
int *L,
int p_num,
int t_num,
int rows_num
)
{
int p_n = p_num;
int t_n = t_num;
int rows_n = rows_num;
int DEF = blockIdx.x * blockDim.x * PER_TH;
int x = blockIdx.x * blockDim.x + threadIdx.x;
int Dim = 0;
if(gridDim.x-1 == blockIdx.x){
Dim = t_n - blockIdx.x*blockDim.x;
}else{
Dim = blockDim.x;
}
// Matching phase
int hash = 0;
int temp = 0;
if(x < t_n){
for(int i = 0; i<PER_TH&&(DEF+threadIdx.x+i*Dim)<rows_n;i++){
hash = t[DEF + threadIdx.x + i*Dim].val%p_n;
temp = L[hash*t_n + x];
pt[temp].key = t[DEF + threadIdx.x + i*Dim].key;
pt[temp].val = t[DEF + threadIdx.x + i*Dim].val;
L[hash*t_n + x] = temp + 1;
//printf("i = %d\tloc = %d\tt = %d\n",hash*t_num + x,L[hash*t_num + x],t[x*PER_TH + i].val);
}
}
}
}
|
f91faa424d2611687e4c2a41afd7c2d6ce28028b.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <cfloat>
#include <thrust/sort.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "nms_op.h"
namespace caffe2 {
namespace {
__device__ void _copy(const int size, const float* source, float* target) {
for (int i=0; i<size; i++) {
*(target++) = *(source++);
}
}
__global__ void GetValuesAndIndices(const int nthreads,
const float* boxes,
float* values,
TIndex* indices) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
values[index] = boxes[index * 8 + 4];
indices[index] = static_cast<TIndex>(index);
}
}
__global__ void ComputeOverlapping(const int nthreads,
const float* input_boxes,
const TIndex* indices,
const int num_total,
float* overlaps) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int idA = index / num_total;
int idB = index % num_total;
// if idA < idB, then the score should be higher
if (idA < idB) {
const int iidA = static_cast<int>(indices[idA]) * 8;
const int iidB = static_cast<int>(indices[idB]) * 8;
const float x1A = input_boxes[iidA];
const float y1A = input_boxes[iidA+1];
const float x2A = input_boxes[iidA+2];
const float y2A = input_boxes[iidA+3];
const float areaA = input_boxes[iidA+5];
const float x1B = input_boxes[iidB];
const float y1B = input_boxes[iidB+1];
const float x2B = input_boxes[iidB+2];
const float y2B = input_boxes[iidB+3];
const float areaB = input_boxes[iidB+5];
const float xx1 = (x1A > x1B) ? x1A : x1B;
const float yy1 = (y1A > y1B) ? y1A : y1B;
const float xx2 = (x2A < x2B) ? x2A : x2B;
const float yy2 = (y2A < y2B) ? y2A : y2B;
float w = xx2 - xx1 + 1.;
w = (w > 0.) ? w : 0.;
float h = yy2 - yy1 + 1.;
h = (h > 0.) ? h : 0.;
const float inter = w * h;
overlaps[idA * num_total + idB] = inter / (areaA + areaB - inter);
}
}
}
__global__ void NMSForward(const int nthreads,
const float* overlaps,
const TIndex* indices,
const int num_total,
const float threshold,
const int top_n,
float* output_boxes,
int* output_index,
int* cnt) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
*cnt = 0;
for (int i=0; i<num_total; i++) {
const int id = static_cast<int>(indices[i]);
// make sure we will change for every box
if (output_boxes[id * 8 + 7] < 1.) {
for (int j=i+1; j<num_total; j++) {
if (overlaps[i * num_total + j] >= threshold) {
const int jd = static_cast<int>(indices[j]);
output_boxes[jd * 8 + 7] = 1.;
}
}
// should be the actual index
output_index[(*cnt)] = id;
(*cnt)++;
}
// enough boxes, still assign box
if ((*cnt) == top_n) {
for (int j=i+1; j<num_total; j++) {
const int jd = static_cast<int>(indices[j]);
output_boxes[jd * 8 + 7] = 1.;
}
break;
}
}
}
}
__global__ void CopyBoxes(const int nthreads,
const float* boxes,
float* output_boxes) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const int bid = i * 8;
const int oid = i * 6;
_copy(5, boxes + bid, output_boxes + oid);
output_boxes[oid+5] = boxes[bid+6];
}
}
__global__ void NMSReduceBoxes(const int nthreads,
const float* boxes,
const int* index,
float* output_boxes) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const int id = index[i];
const int bid = id * 8;
const int oid = i * 6;
_copy(5, boxes + bid, output_boxes + oid);
output_boxes[oid+5] = boxes[bid+6];
}
}
__global__ void NMSReduceFeats(const int nthreads,
const float* feats,
const int* index,
const int num_feat,
float* output_feats) {
CUDA_1D_KERNEL_LOOP(ii, nthreads) {
const int j = ii % num_feat;
const int i = ii / num_feat;
const int id = index[i];
output_feats[ii] = feats[id * num_feat + j];
}
}
} // namespace
template<>
bool NMSOp<float, CUDAContext>::RunOnDevice() {
auto& boxes = Input(0);
DCHECK_EQ(boxes.dim32(1), 8);
const int num_total = boxes.dim32(0);
auto& feats = Input(1);
DCHECK_EQ(feats.dim32(0), num_total);
const int num_feat = feats.dim32(1);
// handle the empty case
if (num_total == 0) {
Output(0)->Resize(0, 6);
Output(0)->mutable_data<float>();
Output(1)->Resize(0, num_feat);
Output(1)->mutable_data<float>();
return true;
} else if (num_total == 1) {
auto* output_boxes = Output(0);
auto* output_feats = Output(1);
output_boxes->Resize(1, 6);
output_feats->Resize(1, num_feat);
hipLaunchKernelGGL(( CopyBoxes), dim3(CAFFE_GET_BLOCKS(1)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(), 1,
boxes.data<float>(),
output_boxes->mutable_data<float>());
context_.Copy<float, CUDAContext, CUDAContext>(num_feat, feats.data<float>(),
output_feats->mutable_data<float>());
return true;
}
const int num_pair = num_total * num_total;
const float* boxes_pointer = boxes.data<float>();
const float* feats_pointer = feats.data<float>();
values.Resize(num_total);
indices.Resize(num_total);
float* values_pointer = values.mutable_data<float>();
TIndex* indices_pointer = indices.mutable_data<TIndex>();
hipLaunchKernelGGL(( GetValuesAndIndices), dim3(CAFFE_GET_BLOCKS(num_total)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(), num_total,
boxes_pointer,
values_pointer,
indices_pointer);
// sort the value and get the indexes
thrust::sort_by_key(thrust::hip::par.on(context_.cuda_stream()),
values_pointer,
values_pointer + num_total,
indices_pointer,
thrust::greater<float>());
// pairwise comparison
overlaps.Resize(num_total, num_total);
float* overlaps_pointer = overlaps.mutable_data<float>();
// initialize everything
math::Set<float, CUDAContext>(num_pair, 0., overlaps_pointer, &context_);
hipLaunchKernelGGL(( ComputeOverlapping), dim3(CAFFE_GET_BLOCKS(num_pair)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(), num_pair,
boxes_pointer,
indices_pointer,
num_total,
overlaps_pointer);
// then just reduce by setting up the index
middle.ResizeLike(boxes);
float* middle_pointer = middle.mutable_data<float>();
context_.Copy<float, CUDAContext, CUDAContext>(num_total * 8, boxes_pointer,
middle_pointer);
// also remember the index
mindex.Resize(num_total);
int* mindex_pointer = mindex.mutable_data<int>();
math::Set<int, CUDAContext>(num_total, -1, mindex_pointer, &context_);
mcounter.Resize(1);
int* mcounter_pointer = mcounter.mutable_data<int>();
// using one thread to go down the list
hipLaunchKernelGGL(( NMSForward), dim3(CAFFE_GET_BLOCKS(1)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(), 1,
overlaps_pointer,
indices_pointer,
num_total,
nms_,
dpi_,
middle_pointer,
mindex_pointer,
mcounter_pointer);
// get the counter value
int num_reduced;
context_.Copy<int, CUDAContext, CPUContext>(1, mcounter_pointer, &num_reduced);
// then only copy the valid results
auto* out_boxes = Output(0);
out_boxes->Resize(num_reduced, 6);
float* out_boxes_pointer = out_boxes->mutable_data<float>();
hipLaunchKernelGGL(( NMSReduceBoxes), dim3(CAFFE_GET_BLOCKS(num_reduced)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(), num_reduced,
middle_pointer,
mindex_pointer,
out_boxes_pointer);
auto* out_feats = Output(1);
out_feats->Resize(num_reduced, num_feat);
float* out_feats_pointer = out_feats->mutable_data<float>();
const int num_reduced_feats = num_feat * num_reduced;
hipLaunchKernelGGL(( NMSReduceFeats), dim3(CAFFE_GET_BLOCKS(num_reduced_feats)), dim3(CAFFE_CUDA_NUM_THREADS),
0, context_.cuda_stream(), num_reduced_feats,
feats_pointer,
mindex_pointer,
num_feat,
out_feats_pointer);
return true;
}
REGISTER_CUDA_OPERATOR(NMS,
NMSOp<float, CUDAContext>);
} // namespace caffe2 | f91faa424d2611687e4c2a41afd7c2d6ce28028b.cu | #include <cfloat>
#include <thrust/sort.h>
#include "caffe2/core/context_gpu.h"
#include "caffe2/utils/math.h"
#include "nms_op.h"
namespace caffe2 {
namespace {
__device__ void _copy(const int size, const float* source, float* target) {
for (int i=0; i<size; i++) {
*(target++) = *(source++);
}
}
__global__ void GetValuesAndIndices(const int nthreads,
const float* boxes,
float* values,
TIndex* indices) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
values[index] = boxes[index * 8 + 4];
indices[index] = static_cast<TIndex>(index);
}
}
__global__ void ComputeOverlapping(const int nthreads,
const float* input_boxes,
const TIndex* indices,
const int num_total,
float* overlaps) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
int idA = index / num_total;
int idB = index % num_total;
// if idA < idB, then the score should be higher
if (idA < idB) {
const int iidA = static_cast<int>(indices[idA]) * 8;
const int iidB = static_cast<int>(indices[idB]) * 8;
const float x1A = input_boxes[iidA];
const float y1A = input_boxes[iidA+1];
const float x2A = input_boxes[iidA+2];
const float y2A = input_boxes[iidA+3];
const float areaA = input_boxes[iidA+5];
const float x1B = input_boxes[iidB];
const float y1B = input_boxes[iidB+1];
const float x2B = input_boxes[iidB+2];
const float y2B = input_boxes[iidB+3];
const float areaB = input_boxes[iidB+5];
const float xx1 = (x1A > x1B) ? x1A : x1B;
const float yy1 = (y1A > y1B) ? y1A : y1B;
const float xx2 = (x2A < x2B) ? x2A : x2B;
const float yy2 = (y2A < y2B) ? y2A : y2B;
float w = xx2 - xx1 + 1.;
w = (w > 0.) ? w : 0.;
float h = yy2 - yy1 + 1.;
h = (h > 0.) ? h : 0.;
const float inter = w * h;
overlaps[idA * num_total + idB] = inter / (areaA + areaB - inter);
}
}
}
__global__ void NMSForward(const int nthreads,
const float* overlaps,
const TIndex* indices,
const int num_total,
const float threshold,
const int top_n,
float* output_boxes,
int* output_index,
int* cnt) {
CUDA_1D_KERNEL_LOOP(index, nthreads) {
*cnt = 0;
for (int i=0; i<num_total; i++) {
const int id = static_cast<int>(indices[i]);
// make sure we will change for every box
if (output_boxes[id * 8 + 7] < 1.) {
for (int j=i+1; j<num_total; j++) {
if (overlaps[i * num_total + j] >= threshold) {
const int jd = static_cast<int>(indices[j]);
output_boxes[jd * 8 + 7] = 1.;
}
}
// should be the actual index
output_index[(*cnt)] = id;
(*cnt)++;
}
// enough boxes, still assign box
if ((*cnt) == top_n) {
for (int j=i+1; j<num_total; j++) {
const int jd = static_cast<int>(indices[j]);
output_boxes[jd * 8 + 7] = 1.;
}
break;
}
}
}
}
__global__ void CopyBoxes(const int nthreads,
const float* boxes,
float* output_boxes) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const int bid = i * 8;
const int oid = i * 6;
_copy(5, boxes + bid, output_boxes + oid);
output_boxes[oid+5] = boxes[bid+6];
}
}
__global__ void NMSReduceBoxes(const int nthreads,
const float* boxes,
const int* index,
float* output_boxes) {
CUDA_1D_KERNEL_LOOP(i, nthreads) {
const int id = index[i];
const int bid = id * 8;
const int oid = i * 6;
_copy(5, boxes + bid, output_boxes + oid);
output_boxes[oid+5] = boxes[bid+6];
}
}
__global__ void NMSReduceFeats(const int nthreads,
const float* feats,
const int* index,
const int num_feat,
float* output_feats) {
CUDA_1D_KERNEL_LOOP(ii, nthreads) {
const int j = ii % num_feat;
const int i = ii / num_feat;
const int id = index[i];
output_feats[ii] = feats[id * num_feat + j];
}
}
} // namespace
template<>
bool NMSOp<float, CUDAContext>::RunOnDevice() {
auto& boxes = Input(0);
DCHECK_EQ(boxes.dim32(1), 8);
const int num_total = boxes.dim32(0);
auto& feats = Input(1);
DCHECK_EQ(feats.dim32(0), num_total);
const int num_feat = feats.dim32(1);
// handle the empty case
if (num_total == 0) {
Output(0)->Resize(0, 6);
Output(0)->mutable_data<float>();
Output(1)->Resize(0, num_feat);
Output(1)->mutable_data<float>();
return true;
} else if (num_total == 1) {
auto* output_boxes = Output(0);
auto* output_feats = Output(1);
output_boxes->Resize(1, 6);
output_feats->Resize(1, num_feat);
CopyBoxes<<<CAFFE_GET_BLOCKS(1), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(1,
boxes.data<float>(),
output_boxes->mutable_data<float>());
context_.Copy<float, CUDAContext, CUDAContext>(num_feat, feats.data<float>(),
output_feats->mutable_data<float>());
return true;
}
const int num_pair = num_total * num_total;
const float* boxes_pointer = boxes.data<float>();
const float* feats_pointer = feats.data<float>();
values.Resize(num_total);
indices.Resize(num_total);
float* values_pointer = values.mutable_data<float>();
TIndex* indices_pointer = indices.mutable_data<TIndex>();
GetValuesAndIndices<<<CAFFE_GET_BLOCKS(num_total), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(num_total,
boxes_pointer,
values_pointer,
indices_pointer);
// sort the value and get the indexes
thrust::sort_by_key(thrust::cuda::par.on(context_.cuda_stream()),
values_pointer,
values_pointer + num_total,
indices_pointer,
thrust::greater<float>());
// pairwise comparison
overlaps.Resize(num_total, num_total);
float* overlaps_pointer = overlaps.mutable_data<float>();
// initialize everything
math::Set<float, CUDAContext>(num_pair, 0., overlaps_pointer, &context_);
ComputeOverlapping<<<CAFFE_GET_BLOCKS(num_pair), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(num_pair,
boxes_pointer,
indices_pointer,
num_total,
overlaps_pointer);
// then just reduce by setting up the index
middle.ResizeLike(boxes);
float* middle_pointer = middle.mutable_data<float>();
context_.Copy<float, CUDAContext, CUDAContext>(num_total * 8, boxes_pointer,
middle_pointer);
// also remember the index
mindex.Resize(num_total);
int* mindex_pointer = mindex.mutable_data<int>();
math::Set<int, CUDAContext>(num_total, -1, mindex_pointer, &context_);
mcounter.Resize(1);
int* mcounter_pointer = mcounter.mutable_data<int>();
// using one thread to go down the list
NMSForward<<<CAFFE_GET_BLOCKS(1), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(1,
overlaps_pointer,
indices_pointer,
num_total,
nms_,
dpi_,
middle_pointer,
mindex_pointer,
mcounter_pointer);
// get the counter value
int num_reduced;
context_.Copy<int, CUDAContext, CPUContext>(1, mcounter_pointer, &num_reduced);
// then only copy the valid results
auto* out_boxes = Output(0);
out_boxes->Resize(num_reduced, 6);
float* out_boxes_pointer = out_boxes->mutable_data<float>();
NMSReduceBoxes<<<CAFFE_GET_BLOCKS(num_reduced), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(num_reduced,
middle_pointer,
mindex_pointer,
out_boxes_pointer);
auto* out_feats = Output(1);
out_feats->Resize(num_reduced, num_feat);
float* out_feats_pointer = out_feats->mutable_data<float>();
const int num_reduced_feats = num_feat * num_reduced;
NMSReduceFeats<<<CAFFE_GET_BLOCKS(num_reduced_feats), CAFFE_CUDA_NUM_THREADS,
0, context_.cuda_stream()>>>(num_reduced_feats,
feats_pointer,
mindex_pointer,
num_feat,
out_feats_pointer);
return true;
}
REGISTER_CUDA_OPERATOR(NMS,
NMSOp<float, CUDAContext>);
} // namespace caffe2 |
c2468a5e48c0fab5243c95bcb4fe7a9141bd79af.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <iostream>
#include <hiprand/hiprand_kernel.h>
#include <float.h>
#include "vec3.h"
#include "ray.h"
#include "hitable_list.h"
#include "sphere.h"
#include "camera.h"
#include "material.h"
#include <chrono>
#include <fstream>
#include <sstream>
#define num_hitables 488
using namespace std::chrono;
__device__ void random_scene(hitable **d_list) {
hiprandState_t local_rand_state;
hiprand_init((unsigned long long)clock64(), (unsigned long long)0, 0, &local_rand_state);
d_list[0] = new sphere(vec3(0,-1000.0,-1), 1000, new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for(int a = -11; a < 11; a++) {
for(int b = -11; b < 11; b++) {
float choose_mat = hiprand_uniform(&local_rand_state);
vec3 center(a+hiprand_uniform(&local_rand_state),0.2,b+hiprand_uniform(&local_rand_state));
if(choose_mat < 0.8f)
d_list[i++] = new sphere(center, 0.2, new lambertian(vec3(hiprand_uniform(&local_rand_state)*hiprand_uniform(&local_rand_state),
hiprand_uniform(&local_rand_state)*hiprand_uniform(&local_rand_state),
hiprand_uniform(&local_rand_state)*hiprand_uniform(&local_rand_state))));
else if(choose_mat < 0.95f)
d_list[i++] = new sphere(center, 0.2, new metal(vec3(0.5f*(1.0f+hiprand_uniform(&local_rand_state)),
0.5f*(1.0f+hiprand_uniform(&local_rand_state)),
0.5f*(1.0f+hiprand_uniform(&local_rand_state))),
0.5f*hiprand_uniform(&local_rand_state)));
else d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
d_list[i++] = new sphere(vec3(0, 1,0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
}
__global__ void create_scene(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
random_scene(d_list);
*d_world = new hitable_list(d_list, num_hitables);
vec3 lookfrom(13,2,3);
vec3 lookat(0,0,0);
*d_camera = new camera(lookfrom, lookat, vec3(0,1,0), 30.0, float(nx)/float(ny), 0.1, (lookfrom-lookat).length());
}
}
__global__ void free_scene(hitable **d_list, hitable **d_world, camera **d_camera) {
for(int i=0; i < 22*22+1+3; i++) delete d_list[i];
delete *d_world;
delete *d_camera;
}
__device__ vec3 color(const ray& r, hitable **world, hiprandState_t *rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0,1.0,1.0);
for(int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if(rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0,0.0,0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0,0.0,0.0);
}
__global__ void render(vec3 *img, int nx, int ny, int ns, hitable **world, camera **cam) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= nx) || (j >= ny)) return;
int pixel_index = j * nx + i;
hiprandState_t state;
hiprand_init((unsigned long long)clock64() + pixel_index, (unsigned long long)0, 0, &state);
vec3 col(0,0,0);
for(int s=0; s<ns; s++){
float u = float(i + hiprand_uniform(&state)) / float(nx);
float v = float(j + hiprand_uniform(&state)) / float(ny);
ray r = (*cam)->get_ray(u, v, &state);
col += color(r, world, &state);
}
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
img[pixel_index] = 255.99 * col;
}
int main() {
int nx, ny, ns;
int tx = 8;
int ty = 8;
std::cout << "nx: ";
std::cin >> nx;
std::cout << "ny: ";
std::cin >> ny;
std::cout << "ns: ";
std::cin >> ns;
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
//Alloc Memory
vec3 *img;
hipMallocManaged(&img, nx*ny*sizeof(vec3));
hitable **list, **world;
hipMalloc(&list, num_hitables*sizeof(hitable *));
hipMalloc(&world, sizeof(hitable *));
camera **cam;
hipMalloc(&cam, sizeof(camera *));
auto start = high_resolution_clock::now();
//GPU Process
hipLaunchKernelGGL(( create_scene), dim3(1),dim3(1), 0, 0, list, world, cam, nx, ny);
hipDeviceSynchronize();
hipLaunchKernelGGL(( render), dim3(blocks), dim3(threads), 0, 0, img, nx, ny, ns, world, cam);
hipDeviceSynchronize();
//Generate image
std::cerr << "Rendering Image: " << nx << "x" << ny << std::endl;
std::ofstream myfile;
myfile.open("Image.ppm");
myfile << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(img[pixel_index].r());
int ig = int(img[pixel_index].g());
int ib = int(img[pixel_index].b());
myfile << ir << " " << ig << " " << ib << std::endl;
}
}
myfile.close();
auto end_time = duration_cast<duration<double>>(high_resolution_clock::now() - start).count();
std::cout << "Tempo de Execuo: " << end_time << " segundos" << std::endl ;
//Free Components
hipDeviceSynchronize();
hipLaunchKernelGGL(( free_scene), dim3(1),dim3(1), 0, 0, list, world, cam);
void* freeList[4] = {cam, world, list, img};
for(int i=0; i<4; i++) hipFree(freeList[i]);
hipDeviceReset();
} | c2468a5e48c0fab5243c95bcb4fe7a9141bd79af.cu | #include <iostream>
#include <curand_kernel.h>
#include <float.h>
#include "vec3.h"
#include "ray.h"
#include "hitable_list.h"
#include "sphere.h"
#include "camera.h"
#include "material.h"
#include <chrono>
#include <fstream>
#include <sstream>
#define num_hitables 488
using namespace std::chrono;
__device__ void random_scene(hitable **d_list) {
curandState local_rand_state;
curand_init((unsigned long long)clock64(), (unsigned long long)0, 0, &local_rand_state);
d_list[0] = new sphere(vec3(0,-1000.0,-1), 1000, new lambertian(vec3(0.5, 0.5, 0.5)));
int i = 1;
for(int a = -11; a < 11; a++) {
for(int b = -11; b < 11; b++) {
float choose_mat = curand_uniform(&local_rand_state);
vec3 center(a+curand_uniform(&local_rand_state),0.2,b+curand_uniform(&local_rand_state));
if(choose_mat < 0.8f)
d_list[i++] = new sphere(center, 0.2, new lambertian(vec3(curand_uniform(&local_rand_state)*curand_uniform(&local_rand_state),
curand_uniform(&local_rand_state)*curand_uniform(&local_rand_state),
curand_uniform(&local_rand_state)*curand_uniform(&local_rand_state))));
else if(choose_mat < 0.95f)
d_list[i++] = new sphere(center, 0.2, new metal(vec3(0.5f*(1.0f+curand_uniform(&local_rand_state)),
0.5f*(1.0f+curand_uniform(&local_rand_state)),
0.5f*(1.0f+curand_uniform(&local_rand_state))),
0.5f*curand_uniform(&local_rand_state)));
else d_list[i++] = new sphere(center, 0.2, new dielectric(1.5));
}
}
d_list[i++] = new sphere(vec3(0, 1,0), 1.0, new dielectric(1.5));
d_list[i++] = new sphere(vec3(-4, 1, 0), 1.0, new lambertian(vec3(0.4, 0.2, 0.1)));
d_list[i++] = new sphere(vec3(4, 1, 0), 1.0, new metal(vec3(0.7, 0.6, 0.5), 0.0));
}
__global__ void create_scene(hitable **d_list, hitable **d_world, camera **d_camera, int nx, int ny) {
if (threadIdx.x == 0 && blockIdx.x == 0) {
random_scene(d_list);
*d_world = new hitable_list(d_list, num_hitables);
vec3 lookfrom(13,2,3);
vec3 lookat(0,0,0);
*d_camera = new camera(lookfrom, lookat, vec3(0,1,0), 30.0, float(nx)/float(ny), 0.1, (lookfrom-lookat).length());
}
}
__global__ void free_scene(hitable **d_list, hitable **d_world, camera **d_camera) {
for(int i=0; i < 22*22+1+3; i++) delete d_list[i];
delete *d_world;
delete *d_camera;
}
__device__ vec3 color(const ray& r, hitable **world, curandState *rand_state) {
ray cur_ray = r;
vec3 cur_attenuation = vec3(1.0,1.0,1.0);
for(int i = 0; i < 50; i++) {
hit_record rec;
if ((*world)->hit(cur_ray, 0.001f, FLT_MAX, rec)) {
ray scattered;
vec3 attenuation;
if(rec.mat_ptr->scatter(cur_ray, rec, attenuation, scattered, rand_state)) {
cur_attenuation *= attenuation;
cur_ray = scattered;
}
else {
return vec3(0.0,0.0,0.0);
}
}
else {
vec3 unit_direction = unit_vector(cur_ray.direction());
float t = 0.5f*(unit_direction.y() + 1.0f);
vec3 c = (1.0f-t)*vec3(1.0, 1.0, 1.0) + t*vec3(0.5, 0.7, 1.0);
return cur_attenuation * c;
}
}
return vec3(0.0,0.0,0.0);
}
__global__ void render(vec3 *img, int nx, int ny, int ns, hitable **world, camera **cam) {
int i = threadIdx.x + blockIdx.x * blockDim.x;
int j = threadIdx.y + blockIdx.y * blockDim.y;
if((i >= nx) || (j >= ny)) return;
int pixel_index = j * nx + i;
curandState state;
curand_init((unsigned long long)clock64() + pixel_index, (unsigned long long)0, 0, &state);
vec3 col(0,0,0);
for(int s=0; s<ns; s++){
float u = float(i + curand_uniform(&state)) / float(nx);
float v = float(j + curand_uniform(&state)) / float(ny);
ray r = (*cam)->get_ray(u, v, &state);
col += color(r, world, &state);
}
col /= float(ns);
col[0] = sqrt(col[0]);
col[1] = sqrt(col[1]);
col[2] = sqrt(col[2]);
img[pixel_index] = 255.99 * col;
}
int main() {
int nx, ny, ns;
int tx = 8;
int ty = 8;
std::cout << "nx: ";
std::cin >> nx;
std::cout << "ny: ";
std::cin >> ny;
std::cout << "ns: ";
std::cin >> ns;
dim3 blocks(nx/tx+1,ny/ty+1);
dim3 threads(tx,ty);
//Alloc Memory
vec3 *img;
cudaMallocManaged(&img, nx*ny*sizeof(vec3));
hitable **list, **world;
cudaMalloc(&list, num_hitables*sizeof(hitable *));
cudaMalloc(&world, sizeof(hitable *));
camera **cam;
cudaMalloc(&cam, sizeof(camera *));
auto start = high_resolution_clock::now();
//GPU Process
create_scene<<<1,1>>>(list, world, cam, nx, ny);
cudaDeviceSynchronize();
render<<<blocks, threads>>>(img, nx, ny, ns, world, cam);
cudaDeviceSynchronize();
//Generate image
std::cerr << "Rendering Image: " << nx << "x" << ny << std::endl;
std::ofstream myfile;
myfile.open("Image.ppm");
myfile << "P3\n" << nx << " " << ny << "\n255\n";
for (int j = ny-1; j >= 0; j--) {
for (int i = 0; i < nx; i++) {
size_t pixel_index = j*nx + i;
int ir = int(img[pixel_index].r());
int ig = int(img[pixel_index].g());
int ib = int(img[pixel_index].b());
myfile << ir << " " << ig << " " << ib << std::endl;
}
}
myfile.close();
auto end_time = duration_cast<duration<double>>(high_resolution_clock::now() - start).count();
std::cout << "Tempo de Execução: " << end_time << " segundos" << std::endl ;
//Free Components
cudaDeviceSynchronize();
free_scene<<<1,1>>>(list, world, cam);
void* freeList[4] = {cam, world, list, img};
for(int i=0; i<4; i++) cudaFree(freeList[i]);
cudaDeviceReset();
} |
333119535033c8692da0922c31feed9969c67377.hip | // !!! This is a file automatically generated by hipify!!!
#pragma once
#include "spokes.cu"
#include "defines.h"
#include <stdint.h>
//Everything in global memory
//****************************************************************************************************
__device__ __forceinline__ uint32_t NeighbourTriming_sh(const uint32_t VertexID, //Input: vertex id (start of the spoke)
const uint32_t tid, //Input: thread id
const real x_vertex, const real y_vertex, const real z_vertex, //Input: seed point
const real spoke3d_x_st, const real spoke3d_y_st, const real spoke3d_z_st, //Input: spoke end point
real&spoke3d_x_end, real&spoke3d_y_end, real&spoke3d_z_end, //Input/Output: spoke end point
real&trimmingN_x, real&trimmingN_y, real&trimmingN_z, //Output: last neighbour to trim the spoke
const uint32_t skip1, const uint32_t skip2, const uint32_t skip3, const uint32_t skip4,//Input: neighbour to skip
const uint32_t neighbour_count, //Input: number of my vertex neighbours
real3 sh_points[]){ //Input: all points
//loop over the neighbour of my vertex (neighbour_count) and trim the input spoke
//using Voronoi planes,
//return the trimmed spoke, the neighbour that trimmed it the shortest
uint32_t trimming_neighbour = UINT32_MAX;
uint32_t base_tid = tid*MaxOffsets;
for(uint32_t i=1; i<= neighbour_count; i++){
uint32_t myNeighbour = base_tid + i;
if(myNeighbour == skip1 || myNeighbour == skip2 || myNeighbour == skip3 || myNeighbour == skip4){ continue; }
real x_neighbour(sh_points[myNeighbour].x),
y_neighbour(sh_points[myNeighbour].y),
z_neighbour(sh_points[myNeighbour].z);
real mid_x = (x_neighbour + x_vertex)/2;//point on the trimming plane
real mid_y = (y_neighbour + y_vertex)/2;
real mid_z = (z_neighbour + z_vertex)/2;
real norm_p_x = x_neighbour - x_vertex;//normal to the plane
real norm_p_y = y_neighbour - y_vertex;
real norm_p_z = z_neighbour - z_vertex;
if(SpokePlaneTrimming(mid_x, mid_y, mid_z, norm_p_x, norm_p_y, norm_p_z, //trimming plane
spoke3d_x_st, spoke3d_y_st, spoke3d_z_st, spoke3d_x_end, spoke3d_y_end, spoke3d_z_end) //spoke
){
trimming_neighbour = myNeighbour;
trimmingN_x = x_neighbour;
trimmingN_y = y_neighbour;
trimmingN_z = z_neighbour;
}
}
return trimming_neighbour;
}
//****************************************************************************************************
__device__ __forceinline__ uint32_t ThreeDSpoking_sh(const uint32_t VertexID, //Input: vertex id (start of the spoke)
const uint32_t tid, //Input: thread id
const real x_vertex, const real y_vertex, const real z_vertex, //Input: seed point
const real spoke3d_x_st, const real spoke3d_y_st, const real spoke3d_z_st, //Input: spoke starting point
real&spoke3d_x_end, real&spoke3d_y_end, real&spoke3d_z_end, //Output: spoke end point after trimming
const uint32_t neighbour_count,//Input: neighbour of the vertex neighbours
real&grandparent_x, real&grandparent_y, real&grandparent_z, //Output: the grandparent neighbour coordinates
real3 sh_points[], //Input: all points
hiprandState_t* globalState, int randID){//Input: global state for rand generate
//Shot and trim 3D spoke
//Return the last neighbour vertex that trimmed the spoke and the end point of the spoke
//grandparent is the neighnour that will trim the spoke the shortest
//We use the spoke end as a proxy for direction here and then set the end point correctly after that
uint32_t grandparent = UINT32_MAX;
while(grandparent == UINT32_MAX){
RandSpoke3D(spoke3d_x_st, spoke3d_y_st, spoke3d_z_st, spoke3d_x_end, spoke3d_y_end, spoke3d_z_end, globalState, randID);
spoke3d_x_end = spoke3d_x_st + 1000*spoke3d_x_end;
spoke3d_y_end = spoke3d_y_st + 1000*spoke3d_y_end;
spoke3d_z_end = spoke3d_z_st + 1000*spoke3d_z_end;
//printf("\n 1) spoke3d_end( %f,%f,%f )", spoke3d_x_end, spoke3d_y_end, spoke3d_z_end);
grandparent = NeighbourTriming_sh(VertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke3d_x_st, spoke3d_y_st, spoke3d_z_st,
spoke3d_x_end, spoke3d_y_end, spoke3d_z_end,
grandparent_x, grandparent_y, grandparent_z,
500, 500, 500, 500,
neighbour_count, sh_points);
//if(grandparent == UINT32_MAX){printf(" Invalid grand\n");}
}
/*printf("\n 2) spoke3d_end( %f,%f,%f )", spoke3d_x_end, spoke3d_y_end, spoke3d_z_end);
printf("\n grandparent( %f,%f,%f )", grandparent_x, grandparent_y, grandparent_z);
printf("\n 3dspoke_plan( %f,%f,%f, %f,%f,%f )\n", grandparent_x - x_vertex, grandparent_y- y_vertex , grandparent_z- z_vertex,
(grandparent_x + x_vertex)/2.0, (grandparent_y + y_vertex)/2.0 , (grandparent_z + z_vertex)/2.0);*/
return grandparent;
}
//****************************************************************************************************
__device__ __forceinline__ uint32_t TwoDSpoking_sh(const uint32_t VertexID, //Input: vertex id (start of the spoke)
const uint32_t tid, //Input: thread id
const real x_vertex, const real y_vertex, const real z_vertex, //Input: seed point
const real spoke2d_x_st, const real spoke2d_y_st, const real spoke2d_z_st, //Input: spoke starting point
real&spoke2d_x_end, real&spoke2d_y_end, real&spoke2d_z_end, //Output: spoke end point after trimming
const uint32_t neighbour_count,//Input: neighbour of the vertex neighbours
real&parent_x, real&parent_y, real&parent_z, //Output: the parent neighbour coordinates
const uint32_t grandparent, //Input: the neighbour with whom the spoke lives on its voronoi facet
const real grandparent_x, const real grandparent_y, const real grandparent_z,//Input: grandparent neighbour coordinates
real3 sh_points[], //Input: all points
hiprandState_t* globalState, int randID){//Input: global state for rand generate
//Shot and trim 2D spoke
//Return the last neighbour vertex that trimmed the spoke and the end point of the spoke
real norm_p_x = grandparent_x - x_vertex;//normal to the plane
real norm_p_y = grandparent_y - y_vertex;
real norm_p_z = grandparent_z - z_vertex;
NormalizeVector(norm_p_x, norm_p_y, norm_p_z);
uint32_t parent = UINT32_MAX;
while (parent == UINT32_MAX){
//We use the spoke end as a proxy for direction here and then set the end point correctly after that
RandSpoke2D(spoke2d_x_st, spoke2d_y_st, spoke2d_z_st, //2D spoke starting point
norm_p_x, norm_p_y, norm_p_z, //normal to the plane
spoke2d_x_end, spoke2d_y_end, spoke2d_z_end, //2d spoke direction
globalState, randID);
spoke2d_x_end = spoke2d_x_st + 1000*spoke2d_x_end;
spoke2d_y_end = spoke2d_y_st + 1000*spoke2d_y_end;
spoke2d_z_end = spoke2d_z_st + 1000*spoke2d_z_end;
//printf("\n 1) spoke2d_end( %f,%f,%f )", spoke2d_x_end, spoke2d_y_end, spoke2d_z_end);
parent = NeighbourTriming_sh(VertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke2d_x_st, spoke2d_y_st, spoke2d_z_st,
spoke2d_x_end, spoke2d_y_end, spoke2d_z_end,
parent_x, parent_y, parent_z,
500, grandparent, 500, 500,
neighbour_count, sh_points);
//if(parent == UINT32_MAX){printf(" Invalid parent\n");}
}
/*printf("\n 2) spoke2d_end( %f,%f,%f )", spoke2d_x_end, spoke2d_y_end, spoke2d_z_end);
printf("\n parent( %f,%f,%f )", parent_x, parent_y, parent_z);
printf("\n 2dspoke_plan( %f,%f,%f, %f,%f,%f )\n", parent_x - x_vertex, parent_y- y_vertex , parent_z- z_vertex,
(parent_x + x_vertex)/2.0, (parent_y + y_vertex)/2.0 , (parent_z + z_vertex)/2.0);*/
return parent;
}
//****************************************************************************************************
__device__ __forceinline__ uint32_t OneDSpoking_sh(const uint32_t VertexID, //Input: vertex id (start of the spoke)
const uint32_t tid, //Input: thread id
const real x_vertex, const real y_vertex, const real z_vertex, //Input: seed point
const real spoke1d_x_st, const real spoke1d_y_st, const real spoke1d_z_st, //Input: spoke starting point
real&spoke1d_x_end, real&spoke1d_y_end, real&spoke1d_z_end, //Output: spoke end point after trimming
const uint32_t neighbour_count,//Input: neighbour of the vertex neighbours
const uint32_t grandparent, //Input: the neighbour with whom the spoke lives on its voronoi facet
const real grandparent_x, const real grandparent_y, const real grandparent_z,//Input: grandparent neighbour coordinates
const uint32_t parent, //Input: the other neighbout with whom the spoke lives on another voronoi facet
const real parent_x, const real parent_y, const real parent_z,//Input: parent neighbour coordinates
real3 sh_points[], //Input: all points
hiprandState_t* globalState, int randID){//Input: global state for rand generate
//Shot and trim 1D spoke
//Return the last neighbour vertex that trimmed the spoke and the end point of the spoke
real norm_p1_x = grandparent_x - x_vertex;//normal to the plane 1
real norm_p1_y = grandparent_y - y_vertex;
real norm_p1_z = grandparent_z - z_vertex;
NormalizeVector(norm_p1_x,norm_p1_y,norm_p1_z);
real norm_p2_x = parent_x - x_vertex;//normal to the plane 2
real norm_p2_y = parent_y - y_vertex;
real norm_p2_z = parent_z - z_vertex;
NormalizeVector(norm_p2_x,norm_p2_y,norm_p2_z);
uint32_t child = UINT32_MAX;
real child_x, child_y, child_z;
while(child == UINT32_MAX){
RandSpoke1D(spoke1d_x_st, spoke1d_y_st, spoke1d_z_st,
norm_p1_x, norm_p1_y, norm_p1_z,
norm_p2_x, norm_p2_y, norm_p2_z,
spoke1d_x_end, spoke1d_y_end, spoke1d_z_end,
globalState, randID);
spoke1d_x_end = spoke1d_x_st + 1000*spoke1d_x_end;
spoke1d_y_end = spoke1d_y_st + 1000*spoke1d_y_end;
spoke1d_z_end = spoke1d_z_st + 1000*spoke1d_z_end;
child = NeighbourTriming_sh(VertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke1d_x_st, spoke1d_y_st, spoke1d_z_st,
spoke1d_x_end, spoke1d_y_end, spoke1d_z_end,
child_x, child_y, child_z,
500, grandparent, parent, 500,
neighbour_count, sh_points);
//if(child == UINT32_MAX){printf(" Invalid child\n");}
}
/*printf("\n 2) spoke1d_end( %f,%f,%f )", spoke1d_x_end, spoke1d_y_end, spoke1d_z_end);
printf("\n child( %f,%f,%f )", child_x, child_y, child_z);
printf("\n 1dspoke_plan( %f,%f,%f, %f,%f,%f )\n", child_x - x_vertex, child_y- y_vertex , child_z- z_vertex,
(child_x + x_vertex)/2.0, (child_y + y_vertex)/2.0 , (child_z + z_vertex)/2.0);*/
return child;
}
//****************************************************************************************************
__device__ __forceinline__ void explore_sh(const uint32_t vertexID, //Input: vertex to explore
const uint32_t tid, //Input: thread id
const real x_vertex,const real y_vertex,const real z_vertex,//Input: vertex coords
real3 sh_points[], //Input: all points
const uint32_t neighbour_count, //Input: num neighbours
hiprandState_t* globalState, int randID, //Input: global state for rand generate
uint3&exploredID, //Output: the id of three samples connected to vertexID
real3&sharedVertex){ //Output: shared voronoi vertex between exploredID
//printf("\n ID=%i, myVertex( %f,%f,%f )\n",vertexID, x_vertex, y_vertex, z_vertex);
real grandparent_x, grandparent_y, grandparent_z, parent_x, parent_y, parent_z;
//Shot and trim a 3D spoke with all neighbours and keep a record for the last trimming
//neighbour -> grandparent neighbour
real spoke3d_x_end, spoke3d_y_end, spoke3d_z_end;
uint32_t grandparent = ThreeDSpoking_sh(vertexID,
tid,
x_vertex, y_vertex, z_vertex,
x_vertex, y_vertex, z_vertex,
spoke3d_x_end, spoke3d_y_end, spoke3d_z_end,
neighbour_count,
grandparent_x, grandparent_y, grandparent_z,
sh_points,
globalState, randID);
//Shot 2D spoke from the intersection point
//Trim the 2D spoke with all the neighbours (excpect the grandparent neighbour)
//and keep a record for the last trimming neighbour -> parent neighbour
real spoke2d_x_end, spoke2d_y_end, spoke2d_z_end;
uint32_t parent = TwoDSpoking_sh(vertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke3d_x_end, spoke3d_y_end, spoke3d_z_end,
spoke2d_x_end, spoke2d_y_end, spoke2d_z_end,
neighbour_count,
parent_x, parent_y, parent_z,
grandparent,
grandparent_x, grandparent_y, grandparent_z,
sh_points,
globalState, randID);
//Shot 1D spoke
//Trim the 1D spoke with all the neighbours (excpect the grandparent and parent neighbours)
//and keep a record for the last trimming neighbour -> child neighbour
real spoke1d_x_end, spoke1d_y_end, spoke1d_z_end;
uint32_t child = OneDSpoking_sh(vertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke2d_x_end, spoke2d_y_end, spoke2d_z_end,
spoke1d_x_end, spoke1d_y_end, spoke1d_z_end,
neighbour_count,
grandparent,
grandparent_x, grandparent_y, grandparent_z,
parent,
parent_x, parent_y, parent_z,
sh_points,
globalState, randID);
//Return the grandparent, parent and child as exploredID
//Return the end point of the 1D spoke as sharedVertex
//printf("\n grandparent= %i,parent= %i, child= %i,\n",grandparent, parent, child);
exploredID.x = grandparent;
exploredID.y = parent;
exploredID.z = child;
sharedVertex.x = spoke1d_x_end;
sharedVertex.y = spoke1d_y_end;
sharedVertex.z = spoke1d_z_end;
} | 333119535033c8692da0922c31feed9969c67377.cu | #pragma once
#include "spokes.cu"
#include "defines.h"
#include <stdint.h>
//Everything in global memory
//****************************************************************************************************
__device__ __forceinline__ uint32_t NeighbourTriming_sh(const uint32_t VertexID, //Input: vertex id (start of the spoke)
const uint32_t tid, //Input: thread id
const real x_vertex, const real y_vertex, const real z_vertex, //Input: seed point
const real spoke3d_x_st, const real spoke3d_y_st, const real spoke3d_z_st, //Input: spoke end point
real&spoke3d_x_end, real&spoke3d_y_end, real&spoke3d_z_end, //Input/Output: spoke end point
real&trimmingN_x, real&trimmingN_y, real&trimmingN_z, //Output: last neighbour to trim the spoke
const uint32_t skip1, const uint32_t skip2, const uint32_t skip3, const uint32_t skip4,//Input: neighbour to skip
const uint32_t neighbour_count, //Input: number of my vertex neighbours
real3 sh_points[]){ //Input: all points
//loop over the neighbour of my vertex (neighbour_count) and trim the input spoke
//using Voronoi planes,
//return the trimmed spoke, the neighbour that trimmed it the shortest
uint32_t trimming_neighbour = UINT32_MAX;
uint32_t base_tid = tid*MaxOffsets;
for(uint32_t i=1; i<= neighbour_count; i++){
uint32_t myNeighbour = base_tid + i;
if(myNeighbour == skip1 || myNeighbour == skip2 || myNeighbour == skip3 || myNeighbour == skip4){ continue; }
real x_neighbour(sh_points[myNeighbour].x),
y_neighbour(sh_points[myNeighbour].y),
z_neighbour(sh_points[myNeighbour].z);
real mid_x = (x_neighbour + x_vertex)/2;//point on the trimming plane
real mid_y = (y_neighbour + y_vertex)/2;
real mid_z = (z_neighbour + z_vertex)/2;
real norm_p_x = x_neighbour - x_vertex;//normal to the plane
real norm_p_y = y_neighbour - y_vertex;
real norm_p_z = z_neighbour - z_vertex;
if(SpokePlaneTrimming(mid_x, mid_y, mid_z, norm_p_x, norm_p_y, norm_p_z, //trimming plane
spoke3d_x_st, spoke3d_y_st, spoke3d_z_st, spoke3d_x_end, spoke3d_y_end, spoke3d_z_end) //spoke
){
trimming_neighbour = myNeighbour;
trimmingN_x = x_neighbour;
trimmingN_y = y_neighbour;
trimmingN_z = z_neighbour;
}
}
return trimming_neighbour;
}
//****************************************************************************************************
__device__ __forceinline__ uint32_t ThreeDSpoking_sh(const uint32_t VertexID, //Input: vertex id (start of the spoke)
const uint32_t tid, //Input: thread id
const real x_vertex, const real y_vertex, const real z_vertex, //Input: seed point
const real spoke3d_x_st, const real spoke3d_y_st, const real spoke3d_z_st, //Input: spoke starting point
real&spoke3d_x_end, real&spoke3d_y_end, real&spoke3d_z_end, //Output: spoke end point after trimming
const uint32_t neighbour_count,//Input: neighbour of the vertex neighbours
real&grandparent_x, real&grandparent_y, real&grandparent_z, //Output: the grandparent neighbour coordinates
real3 sh_points[], //Input: all points
curandState* globalState, int randID){//Input: global state for rand generate
//Shot and trim 3D spoke
//Return the last neighbour vertex that trimmed the spoke and the end point of the spoke
//grandparent is the neighnour that will trim the spoke the shortest
//We use the spoke end as a proxy for direction here and then set the end point correctly after that
uint32_t grandparent = UINT32_MAX;
while(grandparent == UINT32_MAX){
RandSpoke3D(spoke3d_x_st, spoke3d_y_st, spoke3d_z_st, spoke3d_x_end, spoke3d_y_end, spoke3d_z_end, globalState, randID);
spoke3d_x_end = spoke3d_x_st + 1000*spoke3d_x_end;
spoke3d_y_end = spoke3d_y_st + 1000*spoke3d_y_end;
spoke3d_z_end = spoke3d_z_st + 1000*spoke3d_z_end;
//printf("\n 1) spoke3d_end( %f,%f,%f )", spoke3d_x_end, spoke3d_y_end, spoke3d_z_end);
grandparent = NeighbourTriming_sh(VertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke3d_x_st, spoke3d_y_st, spoke3d_z_st,
spoke3d_x_end, spoke3d_y_end, spoke3d_z_end,
grandparent_x, grandparent_y, grandparent_z,
500, 500, 500, 500,
neighbour_count, sh_points);
//if(grandparent == UINT32_MAX){printf(" Invalid grand\n");}
}
/*printf("\n 2) spoke3d_end( %f,%f,%f )", spoke3d_x_end, spoke3d_y_end, spoke3d_z_end);
printf("\n grandparent( %f,%f,%f )", grandparent_x, grandparent_y, grandparent_z);
printf("\n 3dspoke_plan( %f,%f,%f, %f,%f,%f )\n", grandparent_x - x_vertex, grandparent_y- y_vertex , grandparent_z- z_vertex,
(grandparent_x + x_vertex)/2.0, (grandparent_y + y_vertex)/2.0 , (grandparent_z + z_vertex)/2.0);*/
return grandparent;
}
//****************************************************************************************************
__device__ __forceinline__ uint32_t TwoDSpoking_sh(const uint32_t VertexID, //Input: vertex id (start of the spoke)
const uint32_t tid, //Input: thread id
const real x_vertex, const real y_vertex, const real z_vertex, //Input: seed point
const real spoke2d_x_st, const real spoke2d_y_st, const real spoke2d_z_st, //Input: spoke starting point
real&spoke2d_x_end, real&spoke2d_y_end, real&spoke2d_z_end, //Output: spoke end point after trimming
const uint32_t neighbour_count,//Input: neighbour of the vertex neighbours
real&parent_x, real&parent_y, real&parent_z, //Output: the parent neighbour coordinates
const uint32_t grandparent, //Input: the neighbour with whom the spoke lives on its voronoi facet
const real grandparent_x, const real grandparent_y, const real grandparent_z,//Input: grandparent neighbour coordinates
real3 sh_points[], //Input: all points
curandState* globalState, int randID){//Input: global state for rand generate
//Shot and trim 2D spoke
//Return the last neighbour vertex that trimmed the spoke and the end point of the spoke
real norm_p_x = grandparent_x - x_vertex;//normal to the plane
real norm_p_y = grandparent_y - y_vertex;
real norm_p_z = grandparent_z - z_vertex;
NormalizeVector(norm_p_x, norm_p_y, norm_p_z);
uint32_t parent = UINT32_MAX;
while (parent == UINT32_MAX){
//We use the spoke end as a proxy for direction here and then set the end point correctly after that
RandSpoke2D(spoke2d_x_st, spoke2d_y_st, spoke2d_z_st, //2D spoke starting point
norm_p_x, norm_p_y, norm_p_z, //normal to the plane
spoke2d_x_end, spoke2d_y_end, spoke2d_z_end, //2d spoke direction
globalState, randID);
spoke2d_x_end = spoke2d_x_st + 1000*spoke2d_x_end;
spoke2d_y_end = spoke2d_y_st + 1000*spoke2d_y_end;
spoke2d_z_end = spoke2d_z_st + 1000*spoke2d_z_end;
//printf("\n 1) spoke2d_end( %f,%f,%f )", spoke2d_x_end, spoke2d_y_end, spoke2d_z_end);
parent = NeighbourTriming_sh(VertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke2d_x_st, spoke2d_y_st, spoke2d_z_st,
spoke2d_x_end, spoke2d_y_end, spoke2d_z_end,
parent_x, parent_y, parent_z,
500, grandparent, 500, 500,
neighbour_count, sh_points);
//if(parent == UINT32_MAX){printf(" Invalid parent\n");}
}
/*printf("\n 2) spoke2d_end( %f,%f,%f )", spoke2d_x_end, spoke2d_y_end, spoke2d_z_end);
printf("\n parent( %f,%f,%f )", parent_x, parent_y, parent_z);
printf("\n 2dspoke_plan( %f,%f,%f, %f,%f,%f )\n", parent_x - x_vertex, parent_y- y_vertex , parent_z- z_vertex,
(parent_x + x_vertex)/2.0, (parent_y + y_vertex)/2.0 , (parent_z + z_vertex)/2.0);*/
return parent;
}
//****************************************************************************************************
__device__ __forceinline__ uint32_t OneDSpoking_sh(const uint32_t VertexID, //Input: vertex id (start of the spoke)
const uint32_t tid, //Input: thread id
const real x_vertex, const real y_vertex, const real z_vertex, //Input: seed point
const real spoke1d_x_st, const real spoke1d_y_st, const real spoke1d_z_st, //Input: spoke starting point
real&spoke1d_x_end, real&spoke1d_y_end, real&spoke1d_z_end, //Output: spoke end point after trimming
const uint32_t neighbour_count,//Input: neighbour of the vertex neighbours
const uint32_t grandparent, //Input: the neighbour with whom the spoke lives on its voronoi facet
const real grandparent_x, const real grandparent_y, const real grandparent_z,//Input: grandparent neighbour coordinates
const uint32_t parent, //Input: the other neighbout with whom the spoke lives on another voronoi facet
const real parent_x, const real parent_y, const real parent_z,//Input: parent neighbour coordinates
real3 sh_points[], //Input: all points
curandState* globalState, int randID){//Input: global state for rand generate
//Shot and trim 1D spoke
//Return the last neighbour vertex that trimmed the spoke and the end point of the spoke
real norm_p1_x = grandparent_x - x_vertex;//normal to the plane 1
real norm_p1_y = grandparent_y - y_vertex;
real norm_p1_z = grandparent_z - z_vertex;
NormalizeVector(norm_p1_x,norm_p1_y,norm_p1_z);
real norm_p2_x = parent_x - x_vertex;//normal to the plane 2
real norm_p2_y = parent_y - y_vertex;
real norm_p2_z = parent_z - z_vertex;
NormalizeVector(norm_p2_x,norm_p2_y,norm_p2_z);
uint32_t child = UINT32_MAX;
real child_x, child_y, child_z;
while(child == UINT32_MAX){
RandSpoke1D(spoke1d_x_st, spoke1d_y_st, spoke1d_z_st,
norm_p1_x, norm_p1_y, norm_p1_z,
norm_p2_x, norm_p2_y, norm_p2_z,
spoke1d_x_end, spoke1d_y_end, spoke1d_z_end,
globalState, randID);
spoke1d_x_end = spoke1d_x_st + 1000*spoke1d_x_end;
spoke1d_y_end = spoke1d_y_st + 1000*spoke1d_y_end;
spoke1d_z_end = spoke1d_z_st + 1000*spoke1d_z_end;
child = NeighbourTriming_sh(VertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke1d_x_st, spoke1d_y_st, spoke1d_z_st,
spoke1d_x_end, spoke1d_y_end, spoke1d_z_end,
child_x, child_y, child_z,
500, grandparent, parent, 500,
neighbour_count, sh_points);
//if(child == UINT32_MAX){printf(" Invalid child\n");}
}
/*printf("\n 2) spoke1d_end( %f,%f,%f )", spoke1d_x_end, spoke1d_y_end, spoke1d_z_end);
printf("\n child( %f,%f,%f )", child_x, child_y, child_z);
printf("\n 1dspoke_plan( %f,%f,%f, %f,%f,%f )\n", child_x - x_vertex, child_y- y_vertex , child_z- z_vertex,
(child_x + x_vertex)/2.0, (child_y + y_vertex)/2.0 , (child_z + z_vertex)/2.0);*/
return child;
}
//****************************************************************************************************
__device__ __forceinline__ void explore_sh(const uint32_t vertexID, //Input: vertex to explore
const uint32_t tid, //Input: thread id
const real x_vertex,const real y_vertex,const real z_vertex,//Input: vertex coords
real3 sh_points[], //Input: all points
const uint32_t neighbour_count, //Input: num neighbours
curandState* globalState, int randID, //Input: global state for rand generate
uint3&exploredID, //Output: the id of three samples connected to vertexID
real3&sharedVertex){ //Output: shared voronoi vertex between exploredID
//printf("\n ID=%i, myVertex( %f,%f,%f )\n",vertexID, x_vertex, y_vertex, z_vertex);
real grandparent_x, grandparent_y, grandparent_z, parent_x, parent_y, parent_z;
//Shot and trim a 3D spoke with all neighbours and keep a record for the last trimming
//neighbour -> grandparent neighbour
real spoke3d_x_end, spoke3d_y_end, spoke3d_z_end;
uint32_t grandparent = ThreeDSpoking_sh(vertexID,
tid,
x_vertex, y_vertex, z_vertex,
x_vertex, y_vertex, z_vertex,
spoke3d_x_end, spoke3d_y_end, spoke3d_z_end,
neighbour_count,
grandparent_x, grandparent_y, grandparent_z,
sh_points,
globalState, randID);
//Shot 2D spoke from the intersection point
//Trim the 2D spoke with all the neighbours (excpect the grandparent neighbour)
//and keep a record for the last trimming neighbour -> parent neighbour
real spoke2d_x_end, spoke2d_y_end, spoke2d_z_end;
uint32_t parent = TwoDSpoking_sh(vertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke3d_x_end, spoke3d_y_end, spoke3d_z_end,
spoke2d_x_end, spoke2d_y_end, spoke2d_z_end,
neighbour_count,
parent_x, parent_y, parent_z,
grandparent,
grandparent_x, grandparent_y, grandparent_z,
sh_points,
globalState, randID);
//Shot 1D spoke
//Trim the 1D spoke with all the neighbours (excpect the grandparent and parent neighbours)
//and keep a record for the last trimming neighbour -> child neighbour
real spoke1d_x_end, spoke1d_y_end, spoke1d_z_end;
uint32_t child = OneDSpoking_sh(vertexID,
tid,
x_vertex, y_vertex, z_vertex,
spoke2d_x_end, spoke2d_y_end, spoke2d_z_end,
spoke1d_x_end, spoke1d_y_end, spoke1d_z_end,
neighbour_count,
grandparent,
grandparent_x, grandparent_y, grandparent_z,
parent,
parent_x, parent_y, parent_z,
sh_points,
globalState, randID);
//Return the grandparent, parent and child as exploredID
//Return the end point of the 1D spoke as sharedVertex
//printf("\n grandparent= %i,parent= %i, child= %i,\n",grandparent, parent, child);
exploredID.x = grandparent;
exploredID.y = parent;
exploredID.z = child;
sharedVertex.x = spoke1d_x_end;
sharedVertex.y = spoke1d_y_end;
sharedVertex.z = spoke1d_z_end;
} |
bd7f86cbb932738271eb597f6878ce3dc4344ec5.hip | // !!! This is a file automatically generated by hipify!!!
/*
CUDA BarnesHut v2.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2011, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <hip/hip_runtime.h>
// thread count
#define THREADS1 512 /* must be a power of 2 */
#define THREADS2 1024
#define THREADS3 1024
#define THREADS4 256
#define THREADS5 256
#define THREADS6 512
// block count = factor * #SMs
#define FACTOR1 3
#define FACTOR2 1
#define FACTOR3 1 /* must all be resident at the same time */
#define FACTOR4 1 /* must all be resident at the same time */
#define FACTOR5 5
#define FACTOR6 3
#define WARPSIZE 32
#define MAXDEPTH 32
/******************************************************************************/
// childd is aliased with velxd, velyd, velzd, accxd, accyd, acczd, and sortd but they never use the same memory locations
__constant__ int nnodesd, nbodiesd;
__constant__ float dtimed, dthfd, epssqd, itolsqd;
__constant__ volatile float *massd, *posxd, *posyd, *poszd, *velxd, *velyd, *velzd, *accxd, *accyd, *acczd;
__constant__ volatile float *maxxd, *maxyd, *maxzd, *minxd, *minyd, *minzd;
__constant__ volatile int *errd, *sortd, *childd, *countd, *startd;
__device__ volatile int stepd, bottomd, maxdepthd, blkcntd;
__device__ volatile float radiusd;
/******************************************************************************/
/*** initialize memory ********************************************************/
/******************************************************************************/
__global__ void InitializationKernel()
{
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
/******************************************************************************/
/*** compute center and radius ************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS1, FACTOR1)
void BoundingBoxKernel()
{
register int i, j, k, inc;
register float val, minx, maxx, miny, maxy, minz, maxz;
__shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
minx = maxx = posxd[0];
miny = maxy = posyd[0];
minz = maxz = poszd[0];
// scan all bodies
i = threadIdx.x;
inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
val = posxd[j];
minx = min(minx, val);
maxx = max(maxx, val);
val = posyd[j];
miny = min(miny, val);
maxy = max(maxy, val);
val = poszd[j];
minz = min(minz, val);
maxz = max(maxz, val);
}
// reduction in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = minx = min(minx, sminx[k]);
smaxx[i] = maxx = max(maxx, smaxx[k]);
sminy[i] = miny = min(miny, sminy[k]);
smaxy[i] = maxy = max(maxy, smaxy[k]);
sminz[i] = minz = min(minz, sminz[k]);
smaxz[i] = maxz = max(maxz, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
minxd[k] = minx;
maxxd[k] = maxx;
minyd[k] = miny;
maxyd[k] = maxy;
minzd[k] = minz;
maxzd[k] = maxz;
inc = gridDim.x - 1;
if (inc == atomicInc((unsigned int *)&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx = min(minx, minxd[j]);
maxx = max(maxx, maxxd[j]);
miny = min(miny, minyd[j]);
maxy = max(maxy, maxyd[j]);
minz = min(minz, minzd[j]);
maxz = max(maxz, maxzd[j]);
}
// compute 'radius'
val = max(maxx - minx, maxy - miny);
radiusd = max(val, maxz - minz) * 0.5f;
// create root node
k = nnodesd;
bottomd = k;
massd[k] = -1.0f;
startd[k] = 0;
posxd[k] = (minx + maxx) * 0.5f;
posyd[k] = (miny + maxy) * 0.5f;
poszd[k] = (minz + maxz) * 0.5f;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
stepd++;
}
}
}
/******************************************************************************/
/*** build tree ***************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS2, FACTOR2)
void TreeBuildingKernel()
{
register int i, j, k, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register int ch, n, cell, locked, patch;
register float radius, rootx, rooty, rootz;
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius;
j = 0;
// determine which child to follow
if (rootx < px) j = 1;
if (rooty < py) j += 2;
if (rootz < pz) j += 4;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
j = 0;
// determine which child to follow
if (posxd[n] < px) j = 1;
if (posyd[n] < py) j += 2;
if (poszd[n] < pz) j += 4;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
if (ch == -1) {
// if null, just insert the new body
childd[locked] = i;
} else { // there already is a body in this position
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
patch = max(patch, cell);
x = (j & 1) * r;
y = ((j >> 1) & 1) * r;
z = ((j >> 2) & 1) * r;
r *= 0.5f;
massd[cell] = -1.0f;
startd[cell] = -1;
x = posxd[cell] = posxd[n] - r + x;
y = posyd[cell] = posyd[n] - r + y;
z = poszd[cell] = poszd[n] - r + z;
for (k = 0; k < 8; k++) childd[cell*8+k] = -1;
if (patch != cell) {
childd[n*8+j] = cell;
}
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j += 2;
if (z < poszd[ch]) j += 4;
childd[cell*8+j] = ch;
n = cell;
j = 0;
if (x < px) j = 1;
if (y < py) j += 2;
if (z < pz) j += 4;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
__threadfence(); // push out subtree
childd[locked] = patch;
}
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
}
__syncthreads(); // throttle
}
// record maximum tree depth
atomicMax((int *)&maxdepthd, localmaxdepth);
}
/******************************************************************************/
/*** compute center of mass ***************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS3, FACTOR3)
void SummarizationKernel()
{
register int i, j, k, ch, inc, missing, cnt, bottom;
register float m, cm, px, py, pz;
__shared__ volatile int child[THREADS3 * 8];
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
missing = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (missing == 0) {
// new cell, so initialize
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
j = 0;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
child[missing*THREADS3+threadIdx.x] = ch; // cache missing children
m = massd[ch];
missing++;
if (m >= 0.0f) {
// child is ready
missing--;
if (ch >= nbodiesd) { // count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
j++;
}
}
cnt += j;
}
if (missing != 0) {
do {
// poll missing child
ch = child[(missing-1)*THREADS3+threadIdx.x];
m = massd[ch];
if (m >= 0.0f) {
// child is now ready
missing--;
if (ch >= nbodiesd) {
// count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
// repeat until we are done or child is not ready
} while ((m >= 0.0f) && (missing != 0));
}
if (missing == 0) {
// all children are ready, so store computed information
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence(); // make sure data are visible before setting mass
massd[k] = cm;
k += inc; // move on to next cell
}
}
}
/******************************************************************************/
/*** sort bodies **************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS4, FACTOR4)
void SortKernel()
{
register int i, k, ch, dec, start, bottom;
bottom = bottomd;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else if (ch >= 0) {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
k -= dec; // move on to next cell
}
__syncthreads(); // throttle
}
}
/******************************************************************************/
/*** compute force ************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS5, FACTOR5)
void ForceCalculationKernel()
{
register int i, j, k, n, depth, base, sbase, diff;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ volatile float dq[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ volatile int step, maxdepth;
if (0 == threadIdx.x) {
step = stepd;
maxdepth = maxdepthd;
tmp = radiusd;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepth; i++) {
dq[i] = dq[i - 1] * 0.25f;
}
if (maxdepth > MAXDEPTH) {
*errd = maxdepth;
}
}
__syncthreads();
if (maxdepth <= MAXDEPTH) {
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
node[j] = nnodesd;
pos[j] = 0;
}
__threadfence(); // make sure it's visible
while (depth >= j) {
// stack is not empty
while (pos[depth] < 8) {
// node on top of stack has more children to process
n = childd[node[depth]*8+pos[depth]]; // load child pointer
if (sbase == threadIdx.x) {
// I'm the first thread in the warp
pos[depth]++;
}
__threadfence(); // make sure it's visible
if (n >= 0) {
dx = posxd[n] - px;
dy = posyd[n] - py;
dz = poszd[n] - pz;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = massd[n] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
depth++;
if (sbase == threadIdx.x) {
node[depth] = n;
pos[depth] = 0;
}
__threadfence(); // make sure it's visible
}
} else {
depth = max(j, depth - 1); // early out because all remaining children are also zero
}
}
depth--; // done with this level
}
if (step > 0) {
// update velocity
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/******************************************************************************/
/*** advance bodies ***********************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS6, FACTOR6)
void IntegrationKernel()
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/******************************************************************************/
static void CudaTest(char *msg)
{
hipError_t e;
hipDeviceSynchronize();
if (hipSuccess != (e = hipGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", hipGetErrorString(e));
exit(-1);
}
}
/******************************************************************************/
// random number generator
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/******************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
register int nnodes, nbodies, step, timesteps, wb_period;
register int runtime, mintime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[8];
clock_t starttime, endtime;
hipEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
// perform some checks
fprintf(stderr, "CUDA BarnesHut v2.1\n");
if (argc != 5) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps input_file writeback_period\n");
exit(-1);
}
int deviceCount;
hipGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
hipDeviceProp_t deviceProp;
hipGetDeviceProperties(&deviceProp, 0);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (MAXDEPTH > WARPSIZE) {
fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n");
exit(-1);
}
if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n");
exit(-1);
}
// set L1/shared memory configuration
hipFuncSetCacheConfig(BoundingBoxKernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(TreeBuildingKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(SummarizationKernel, hipFuncCachePreferShared);
hipFuncSetCacheConfig(SortKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(ForceCalculationKernel, hipFuncCachePreferL1);
hipFuncSetCacheConfig(IntegrationKernel, hipFuncCachePreferL1);
hipGetLastError(); // reset error value
for (run = 0; run < 3; run++) {
for (i = 0; i < 8; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
wb_period = atoi(argv[4]);
if (wb_period < 1) {
fprintf(stderr, "wb_period is too small: %d\n", wb_period);
exit(-1);
}
// allocate memory
if (run == 0) {
fprintf(stderr, "nodes = %d\n", nnodes+1);
fprintf(stderr, "configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (hipSuccess != hipMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (hipSuccess != hipMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (hipSuccess != hipMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (hipSuccess != hipMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (hipSuccess != hipMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (hipSuccess != hipMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (hipSuccess != hipMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (hipSuccess != hipMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
// alias arrays
int inc = (nbodies + WARPSIZE - 1) & (-WARPSIZE);
velxl = (float *)&childl[0*inc];
velyl = (float *)&childl[1*inc];
velzl = (float *)&childl[2*inc];
accxl = (float *)&childl[3*inc];
accyl = (float *)&childl[4*inc];
acczl = (float *)&childl[5*inc];
sortl = (int *)&childl[6*inc];
if (hipSuccess != hipMalloc((void **)&maxxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (hipSuccess != hipMalloc((void **)&maxyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (hipSuccess != hipMalloc((void **)&maxzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (hipSuccess != hipMalloc((void **)&minxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (hipSuccess != hipMalloc((void **)&minyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (hipSuccess != hipMalloc((void **)&minzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
if (hipSuccess != hipMemcpyToSymbol(nnodesd, &nnodes, sizeof(int))) fprintf(stderr, "copying of nnodes to device failed\n"); CudaTest("nnode copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(nbodiesd, &nbodies, sizeof(int))) fprintf(stderr, "copying of nbodies to device failed\n"); CudaTest("nbody copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(errd, &errl, sizeof(void*))) fprintf(stderr, "copying of err to device failed\n"); CudaTest("err copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(dtimed, &dtime, sizeof(float))) fprintf(stderr, "copying of dtime to device failed\n"); CudaTest("dtime copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(dthfd, &dthf, sizeof(float))) fprintf(stderr, "copying of dthf to device failed\n"); CudaTest("dthf copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(epssqd, &epssq, sizeof(float))) fprintf(stderr, "copying of epssq to device failed\n"); CudaTest("epssq copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(itolsqd, &itolsq, sizeof(float))) fprintf(stderr, "copying of itolsq to device failed\n"); CudaTest("itolsq copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(sortd, &sortl, sizeof(void*))) fprintf(stderr, "copying of sortl to device failed\n"); CudaTest("sortl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(countd, &countl, sizeof(void*))) fprintf(stderr, "copying of countl to device failed\n"); CudaTest("countl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(startd, &startl, sizeof(void*))) fprintf(stderr, "copying of startl to device failed\n"); CudaTest("startl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(childd, &childl, sizeof(void*))) fprintf(stderr, "copying of childl to device failed\n"); CudaTest("childl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(massd, &massl, sizeof(void*))) fprintf(stderr, "copying of massl to device failed\n"); CudaTest("massl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(posxd, &posxl, sizeof(void*))) fprintf(stderr, "copying of posxl to device failed\n"); CudaTest("posxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(posyd, &posyl, sizeof(void*))) fprintf(stderr, "copying of posyl to device failed\n"); CudaTest("posyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(poszd, &poszl, sizeof(void*))) fprintf(stderr, "copying of poszl to device failed\n"); CudaTest("poszl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velxd, &velxl, sizeof(void*))) fprintf(stderr, "copying of velxl to device failed\n"); CudaTest("velxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velyd, &velyl, sizeof(void*))) fprintf(stderr, "copying of velyl to device failed\n"); CudaTest("velyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(velzd, &velzl, sizeof(void*))) fprintf(stderr, "copying of velzl to device failed\n"); CudaTest("velzl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(accxd, &accxl, sizeof(void*))) fprintf(stderr, "copying of accxl to device failed\n"); CudaTest("accxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(accyd, &accyl, sizeof(void*))) fprintf(stderr, "copying of accyl to device failed\n"); CudaTest("accyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(acczd, &acczl, sizeof(void*))) fprintf(stderr, "copying of acczl to device failed\n"); CudaTest("acczl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxxd, &maxxl, sizeof(void*))) fprintf(stderr, "copying of maxxl to device failed\n"); CudaTest("maxxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxyd, &maxyl, sizeof(void*))) fprintf(stderr, "copying of maxyl to device failed\n"); CudaTest("maxyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(maxzd, &maxzl, sizeof(void*))) fprintf(stderr, "copying of maxzl to device failed\n"); CudaTest("maxzl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minxd, &minxl, sizeof(void*))) fprintf(stderr, "copying of minxl to device failed\n"); CudaTest("minxl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minyd, &minyl, sizeof(void*))) fprintf(stderr, "copying of minyl to device failed\n"); CudaTest("minyl copy to device failed");
if (hipSuccess != hipMemcpyToSymbol(minzd, &minzl, sizeof(void*))) fprintf(stderr, "copying of minzl to device failed\n"); CudaTest("minzl copy to device failed");
}
// Open system file
char *filePath = "system16384.dat";
FILE *fh = fopen(argv[3], "r");
if (!fh) {
printf("E: Unable to open %s (read-only)\n", argv[3]);
exit(EXIT_FAILURE);
}
// Load particles from file
char line[256];
int tmp;
i = 0;
do {
// Read line
line[0] = '\0';
fgets(line, 256, fh);
// Grab body
tmp = sscanf(line, " %f %f %f %f %f %f %f ", &posx[i], &posy[i], &posz[i],
&velx[i], &vely[i], &velz[i], &mass[i]);
if (tmp != 7)
continue;
i++;
if (i >= nbodies) {
break;
}
} while (!feof(fh));
// generate input
#if 0
drndset(7);
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
#endif
if (hipSuccess != hipMemcpy(massl, mass, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (hipSuccess != hipMemcpy(posxl, posx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (hipSuccess != hipMemcpy(posyl, posy, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (hipSuccess != hipMemcpy(poszl, posz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (hipSuccess != hipMemcpy(velxl, velx, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (hipSuccess != hipMemcpy(velyl, vely, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (hipSuccess != hipMemcpy(velzl, velz, sizeof(float) * nbodies, hipMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (lauch GPU kernels)
hipEventCreate(&start); hipEventCreate(&stop);
starttime = clock();
hipEventRecord(start, 0);
hipLaunchKernelGGL(( InitializationKernel), dim3(1), dim3(1), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[0] += time;
CudaTest("kernel 0 launch failed");
for (step = 0; step < timesteps; step++) {
hipEventRecord(start, 0);
hipLaunchKernelGGL(( BoundingBoxKernel), dim3(blocks * FACTOR1), dim3(THREADS1), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[1] += time;
CudaTest("kernel 1 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( TreeBuildingKernel), dim3(blocks * FACTOR2), dim3(THREADS2), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[2] += time;
CudaTest("kernel 2 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SummarizationKernel), dim3(blocks * FACTOR3), dim3(THREADS3), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[3] += time;
CudaTest("kernel 3 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( SortKernel), dim3(blocks * FACTOR4), dim3(THREADS4), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[4] += time;
CudaTest("kernel 4 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( ForceCalculationKernel), dim3(blocks * FACTOR5), dim3(THREADS5), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[5] += time;
CudaTest("kernel 5 launch failed");
hipEventRecord(start, 0);
hipLaunchKernelGGL(( IntegrationKernel), dim3(blocks * FACTOR6), dim3(THREADS6), 0, 0, );
hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&time, start, stop);
timing[6] += time;
CudaTest("kernel 6 launch failed");
hipEventRecord(start, 0);
if ((step % wb_period) == 0) {
// transfer result back to CPU
//fprintf(stderr, "Copying result back to CPU!\n");
if (hipSuccess != hipMemcpy(&error, errl, sizeof(int), hipMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (hipSuccess != hipMemcpy(posx, posxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (hipSuccess != hipMemcpy(posy, posyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (hipSuccess != hipMemcpy(posz, poszl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (hipSuccess != hipMemcpy(velx, velxl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (hipSuccess != hipMemcpy(vely, velyl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (hipSuccess != hipMemcpy(velz, velzl, sizeof(float) * nbodies, hipMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
}
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
timing[7] += time;
}
endtime = clock();
CudaTest("kernel launch failed");
hipEventDestroy(start); hipEventDestroy(stop);
runtime = (int) (1000.0f * (endtime - starttime) / CLOCKS_PER_SEC);
fprintf(stderr, "runtime: %d ms (", runtime);
time = 0;
for (i = 1; i < 8; i++) {
fprintf(stderr, " %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
fprintf(stderr, ") = %.1f\n", time);
} else {
fprintf(stderr, ") = %.1f FAILED %d\n", time, error);
}
if ((run == 0) || (mintime > runtime)) mintime = runtime;
}
fprintf(stderr, "mintime: %d ms\n", mintime);
// print output
// for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
// }
free(mass);
free(posx);
free(posy);
free(posz);
free(velx);
free(vely);
free(velz);
hipFree(errl);
hipFree(childl);
hipFree(massl);
hipFree(posxl);
hipFree(posyl);
hipFree(poszl);
hipFree(countl);
hipFree(startl);
hipFree(maxxl);
hipFree(maxyl);
hipFree(maxzl);
hipFree(minxl);
hipFree(minyl);
hipFree(minzl);
return 0;
}
| bd7f86cbb932738271eb597f6878ce3dc4344ec5.cu | /*
CUDA BarnesHut v2.1: Simulation of the gravitational forces
in a galactic cluster using the Barnes-Hut n-body algorithm
Copyright (c) 2011, Texas State University-San Marcos. All rights reserved.
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice,
this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice,
this list of conditions and the following disclaimer in the documentation
and/or other materials provided with the distribution.
* Neither the name of Texas State University-San Marcos nor the names of its
contributors may be used to endorse or promote products derived from this
software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED
IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
OF THE POSSIBILITY OF SUCH DAMAGE.
Author: Martin Burtscher
*/
#include <stdlib.h>
#include <stdio.h>
#include <math.h>
#include <time.h>
#include <cuda.h>
// thread count
#define THREADS1 512 /* must be a power of 2 */
#define THREADS2 1024
#define THREADS3 1024
#define THREADS4 256
#define THREADS5 256
#define THREADS6 512
// block count = factor * #SMs
#define FACTOR1 3
#define FACTOR2 1
#define FACTOR3 1 /* must all be resident at the same time */
#define FACTOR4 1 /* must all be resident at the same time */
#define FACTOR5 5
#define FACTOR6 3
#define WARPSIZE 32
#define MAXDEPTH 32
/******************************************************************************/
// childd is aliased with velxd, velyd, velzd, accxd, accyd, acczd, and sortd but they never use the same memory locations
__constant__ int nnodesd, nbodiesd;
__constant__ float dtimed, dthfd, epssqd, itolsqd;
__constant__ volatile float *massd, *posxd, *posyd, *poszd, *velxd, *velyd, *velzd, *accxd, *accyd, *acczd;
__constant__ volatile float *maxxd, *maxyd, *maxzd, *minxd, *minyd, *minzd;
__constant__ volatile int *errd, *sortd, *childd, *countd, *startd;
__device__ volatile int stepd, bottomd, maxdepthd, blkcntd;
__device__ volatile float radiusd;
/******************************************************************************/
/*** initialize memory ********************************************************/
/******************************************************************************/
__global__ void InitializationKernel()
{
*errd = 0;
stepd = -1;
maxdepthd = 1;
blkcntd = 0;
}
/******************************************************************************/
/*** compute center and radius ************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS1, FACTOR1)
void BoundingBoxKernel()
{
register int i, j, k, inc;
register float val, minx, maxx, miny, maxy, minz, maxz;
__shared__ volatile float sminx[THREADS1], smaxx[THREADS1], sminy[THREADS1], smaxy[THREADS1], sminz[THREADS1], smaxz[THREADS1];
// initialize with valid data (in case #bodies < #threads)
minx = maxx = posxd[0];
miny = maxy = posyd[0];
minz = maxz = poszd[0];
// scan all bodies
i = threadIdx.x;
inc = THREADS1 * gridDim.x;
for (j = i + blockIdx.x * THREADS1; j < nbodiesd; j += inc) {
val = posxd[j];
minx = min(minx, val);
maxx = max(maxx, val);
val = posyd[j];
miny = min(miny, val);
maxy = max(maxy, val);
val = poszd[j];
minz = min(minz, val);
maxz = max(maxz, val);
}
// reduction in shared memory
sminx[i] = minx;
smaxx[i] = maxx;
sminy[i] = miny;
smaxy[i] = maxy;
sminz[i] = minz;
smaxz[i] = maxz;
for (j = THREADS1 / 2; j > 0; j /= 2) {
__syncthreads();
if (i < j) {
k = i + j;
sminx[i] = minx = min(minx, sminx[k]);
smaxx[i] = maxx = max(maxx, smaxx[k]);
sminy[i] = miny = min(miny, sminy[k]);
smaxy[i] = maxy = max(maxy, smaxy[k]);
sminz[i] = minz = min(minz, sminz[k]);
smaxz[i] = maxz = max(maxz, smaxz[k]);
}
}
// write block result to global memory
if (i == 0) {
k = blockIdx.x;
minxd[k] = minx;
maxxd[k] = maxx;
minyd[k] = miny;
maxyd[k] = maxy;
minzd[k] = minz;
maxzd[k] = maxz;
inc = gridDim.x - 1;
if (inc == atomicInc((unsigned int *)&blkcntd, inc)) {
// I'm the last block, so combine all block results
for (j = 0; j <= inc; j++) {
minx = min(minx, minxd[j]);
maxx = max(maxx, maxxd[j]);
miny = min(miny, minyd[j]);
maxy = max(maxy, maxyd[j]);
minz = min(minz, minzd[j]);
maxz = max(maxz, maxzd[j]);
}
// compute 'radius'
val = max(maxx - minx, maxy - miny);
radiusd = max(val, maxz - minz) * 0.5f;
// create root node
k = nnodesd;
bottomd = k;
massd[k] = -1.0f;
startd[k] = 0;
posxd[k] = (minx + maxx) * 0.5f;
posyd[k] = (miny + maxy) * 0.5f;
poszd[k] = (minz + maxz) * 0.5f;
k *= 8;
for (i = 0; i < 8; i++) childd[k + i] = -1;
stepd++;
}
}
}
/******************************************************************************/
/*** build tree ***************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS2, FACTOR2)
void TreeBuildingKernel()
{
register int i, j, k, depth, localmaxdepth, skip, inc;
register float x, y, z, r;
register float px, py, pz;
register int ch, n, cell, locked, patch;
register float radius, rootx, rooty, rootz;
// cache root data
radius = radiusd;
rootx = posxd[nnodesd];
rooty = posyd[nnodesd];
rootz = poszd[nnodesd];
localmaxdepth = 1;
skip = 1;
inc = blockDim.x * gridDim.x;
i = threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all bodies assigned to thread
while (i < nbodiesd) {
if (skip != 0) {
// new body, so start traversing at root
skip = 0;
px = posxd[i];
py = posyd[i];
pz = poszd[i];
n = nnodesd;
depth = 1;
r = radius;
j = 0;
// determine which child to follow
if (rootx < px) j = 1;
if (rooty < py) j += 2;
if (rootz < pz) j += 4;
}
// follow path to leaf cell
ch = childd[n*8+j];
while (ch >= nbodiesd) {
n = ch;
depth++;
r *= 0.5f;
j = 0;
// determine which child to follow
if (posxd[n] < px) j = 1;
if (posyd[n] < py) j += 2;
if (poszd[n] < pz) j += 4;
ch = childd[n*8+j];
}
if (ch != -2) { // skip if child pointer is locked and try again later
locked = n*8+j;
if (ch == atomicCAS((int *)&childd[locked], ch, -2)) { // try to lock
if (ch == -1) {
// if null, just insert the new body
childd[locked] = i;
} else { // there already is a body in this position
patch = -1;
// create new cell(s) and insert the old and new body
do {
depth++;
cell = atomicSub((int *)&bottomd, 1) - 1;
if (cell <= nbodiesd) {
*errd = 1;
bottomd = nnodesd;
}
patch = max(patch, cell);
x = (j & 1) * r;
y = ((j >> 1) & 1) * r;
z = ((j >> 2) & 1) * r;
r *= 0.5f;
massd[cell] = -1.0f;
startd[cell] = -1;
x = posxd[cell] = posxd[n] - r + x;
y = posyd[cell] = posyd[n] - r + y;
z = poszd[cell] = poszd[n] - r + z;
for (k = 0; k < 8; k++) childd[cell*8+k] = -1;
if (patch != cell) {
childd[n*8+j] = cell;
}
j = 0;
if (x < posxd[ch]) j = 1;
if (y < posyd[ch]) j += 2;
if (z < poszd[ch]) j += 4;
childd[cell*8+j] = ch;
n = cell;
j = 0;
if (x < px) j = 1;
if (y < py) j += 2;
if (z < pz) j += 4;
ch = childd[n*8+j];
// repeat until the two bodies are different children
} while (ch >= 0);
childd[n*8+j] = i;
__threadfence(); // push out subtree
childd[locked] = patch;
}
localmaxdepth = max(depth, localmaxdepth);
i += inc; // move on to next body
skip = 1;
}
}
__syncthreads(); // throttle
}
// record maximum tree depth
atomicMax((int *)&maxdepthd, localmaxdepth);
}
/******************************************************************************/
/*** compute center of mass ***************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS3, FACTOR3)
void SummarizationKernel()
{
register int i, j, k, ch, inc, missing, cnt, bottom;
register float m, cm, px, py, pz;
__shared__ volatile int child[THREADS3 * 8];
bottom = bottomd;
inc = blockDim.x * gridDim.x;
k = (bottom & (-WARPSIZE)) + threadIdx.x + blockIdx.x * blockDim.x; // align to warp size
if (k < bottom) k += inc;
missing = 0;
// iterate over all cells assigned to thread
while (k <= nnodesd) {
if (missing == 0) {
// new cell, so initialize
cm = 0.0f;
px = 0.0f;
py = 0.0f;
pz = 0.0f;
cnt = 0;
j = 0;
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= 0) {
if (i != j) {
// move children to front (needed later for speed)
childd[k*8+i] = -1;
childd[k*8+j] = ch;
}
child[missing*THREADS3+threadIdx.x] = ch; // cache missing children
m = massd[ch];
missing++;
if (m >= 0.0f) {
// child is ready
missing--;
if (ch >= nbodiesd) { // count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
j++;
}
}
cnt += j;
}
if (missing != 0) {
do {
// poll missing child
ch = child[(missing-1)*THREADS3+threadIdx.x];
m = massd[ch];
if (m >= 0.0f) {
// child is now ready
missing--;
if (ch >= nbodiesd) {
// count bodies (needed later)
cnt += countd[ch] - 1;
}
// add child's contribution
cm += m;
px += posxd[ch] * m;
py += posyd[ch] * m;
pz += poszd[ch] * m;
}
// repeat until we are done or child is not ready
} while ((m >= 0.0f) && (missing != 0));
}
if (missing == 0) {
// all children are ready, so store computed information
countd[k] = cnt;
m = 1.0f / cm;
posxd[k] = px * m;
posyd[k] = py * m;
poszd[k] = pz * m;
__threadfence(); // make sure data are visible before setting mass
massd[k] = cm;
k += inc; // move on to next cell
}
}
}
/******************************************************************************/
/*** sort bodies **************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS4, FACTOR4)
void SortKernel()
{
register int i, k, ch, dec, start, bottom;
bottom = bottomd;
dec = blockDim.x * gridDim.x;
k = nnodesd + 1 - dec + threadIdx.x + blockIdx.x * blockDim.x;
// iterate over all cells assigned to thread
while (k >= bottom) {
start = startd[k];
if (start >= 0) {
for (i = 0; i < 8; i++) {
ch = childd[k*8+i];
if (ch >= nbodiesd) {
// child is a cell
startd[ch] = start; // set start ID of child
start += countd[ch]; // add #bodies in subtree
} else if (ch >= 0) {
// child is a body
sortd[start] = ch; // record body in 'sorted' array
start++;
}
}
k -= dec; // move on to next cell
}
__syncthreads(); // throttle
}
}
/******************************************************************************/
/*** compute force ************************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS5, FACTOR5)
void ForceCalculationKernel()
{
register int i, j, k, n, depth, base, sbase, diff;
register float px, py, pz, ax, ay, az, dx, dy, dz, tmp;
__shared__ volatile int pos[MAXDEPTH * THREADS5/WARPSIZE], node[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ volatile float dq[MAXDEPTH * THREADS5/WARPSIZE];
__shared__ volatile int step, maxdepth;
if (0 == threadIdx.x) {
step = stepd;
maxdepth = maxdepthd;
tmp = radiusd;
// precompute values that depend only on tree level
dq[0] = tmp * tmp * itolsqd;
for (i = 1; i < maxdepth; i++) {
dq[i] = dq[i - 1] * 0.25f;
}
if (maxdepth > MAXDEPTH) {
*errd = maxdepth;
}
}
__syncthreads();
if (maxdepth <= MAXDEPTH) {
// figure out first thread in each warp (lane 0)
base = threadIdx.x / WARPSIZE;
sbase = base * WARPSIZE;
j = base * MAXDEPTH;
diff = threadIdx.x - sbase;
// make multiple copies to avoid index calculations later
if (diff < MAXDEPTH) {
dq[diff+j] = dq[diff];
}
__syncthreads();
// iterate over all bodies assigned to thread
for (k = threadIdx.x + blockIdx.x * blockDim.x; k < nbodiesd; k += blockDim.x * gridDim.x) {
i = sortd[k]; // get permuted/sorted index
// cache position info
px = posxd[i];
py = posyd[i];
pz = poszd[i];
ax = 0.0f;
ay = 0.0f;
az = 0.0f;
// initialize iteration stack, i.e., push root node onto stack
depth = j;
if (sbase == threadIdx.x) {
node[j] = nnodesd;
pos[j] = 0;
}
__threadfence(); // make sure it's visible
while (depth >= j) {
// stack is not empty
while (pos[depth] < 8) {
// node on top of stack has more children to process
n = childd[node[depth]*8+pos[depth]]; // load child pointer
if (sbase == threadIdx.x) {
// I'm the first thread in the warp
pos[depth]++;
}
__threadfence(); // make sure it's visible
if (n >= 0) {
dx = posxd[n] - px;
dy = posyd[n] - py;
dz = poszd[n] - pz;
tmp = dx*dx + (dy*dy + (dz*dz + epssqd)); // compute distance squared (plus softening)
if ((n < nbodiesd) || __all(tmp >= dq[depth])) { // check if all threads agree that cell is far enough away (or is a body)
tmp = rsqrtf(tmp); // compute distance
tmp = massd[n] * tmp * tmp * tmp;
ax += dx * tmp;
ay += dy * tmp;
az += dz * tmp;
} else {
// push cell onto stack
depth++;
if (sbase == threadIdx.x) {
node[depth] = n;
pos[depth] = 0;
}
__threadfence(); // make sure it's visible
}
} else {
depth = max(j, depth - 1); // early out because all remaining children are also zero
}
}
depth--; // done with this level
}
if (step > 0) {
// update velocity
velxd[i] += (ax - accxd[i]) * dthfd;
velyd[i] += (ay - accyd[i]) * dthfd;
velzd[i] += (az - acczd[i]) * dthfd;
}
// save computed acceleration
accxd[i] = ax;
accyd[i] = ay;
acczd[i] = az;
}
}
}
/******************************************************************************/
/*** advance bodies ***********************************************************/
/******************************************************************************/
__global__
__launch_bounds__(THREADS6, FACTOR6)
void IntegrationKernel()
{
register int i, inc;
register float dvelx, dvely, dvelz;
register float velhx, velhy, velhz;
// iterate over all bodies assigned to thread
inc = blockDim.x * gridDim.x;
for (i = threadIdx.x + blockIdx.x * blockDim.x; i < nbodiesd; i += inc) {
// integrate
dvelx = accxd[i] * dthfd;
dvely = accyd[i] * dthfd;
dvelz = acczd[i] * dthfd;
velhx = velxd[i] + dvelx;
velhy = velyd[i] + dvely;
velhz = velzd[i] + dvelz;
posxd[i] += velhx * dtimed;
posyd[i] += velhy * dtimed;
poszd[i] += velhz * dtimed;
velxd[i] = velhx + dvelx;
velyd[i] = velhy + dvely;
velzd[i] = velhz + dvelz;
}
}
/******************************************************************************/
static void CudaTest(char *msg)
{
cudaError_t e;
cudaThreadSynchronize();
if (cudaSuccess != (e = cudaGetLastError())) {
fprintf(stderr, "%s: %d\n", msg, e);
fprintf(stderr, "%s\n", cudaGetErrorString(e));
exit(-1);
}
}
/******************************************************************************/
// random number generator
#define MULT 1103515245
#define ADD 12345
#define MASK 0x7FFFFFFF
#define TWOTO31 2147483648.0
static int A = 1;
static int B = 0;
static int randx = 1;
static int lastrand;
static void drndset(int seed)
{
A = 1;
B = 0;
randx = (A * seed + B) & MASK;
A = (MULT * A) & MASK;
B = (MULT * B + ADD) & MASK;
}
static double drnd()
{
lastrand = randx;
randx = (A * randx + B) & MASK;
return (double)lastrand / TWOTO31;
}
/******************************************************************************/
int main(int argc, char *argv[])
{
register int i, run, blocks;
register int nnodes, nbodies, step, timesteps, wb_period;
register int runtime, mintime;
int error;
register float dtime, dthf, epssq, itolsq;
float time, timing[8];
clock_t starttime, endtime;
cudaEvent_t start, stop;
float *mass, *posx, *posy, *posz, *velx, *vely, *velz;
int *errl, *sortl, *childl, *countl, *startl;
float *massl;
float *posxl, *posyl, *poszl;
float *velxl, *velyl, *velzl;
float *accxl, *accyl, *acczl;
float *maxxl, *maxyl, *maxzl;
float *minxl, *minyl, *minzl;
// perform some checks
fprintf(stderr, "CUDA BarnesHut v2.1\n");
if (argc != 5) {
fprintf(stderr, "\n");
fprintf(stderr, "arguments: number_of_bodies number_of_timesteps input_file writeback_period\n");
exit(-1);
}
int deviceCount;
cudaGetDeviceCount(&deviceCount);
if (deviceCount == 0) {
fprintf(stderr, "There is no device supporting CUDA\n");
exit(-1);
}
cudaDeviceProp deviceProp;
cudaGetDeviceProperties(&deviceProp, 0);
if ((deviceProp.major == 9999) && (deviceProp.minor == 9999)) {
fprintf(stderr, "There is no CUDA capable device\n");
exit(-1);
}
if (deviceProp.major < 2) {
fprintf(stderr, "Need at least compute capability 2.0\n");
exit(-1);
}
if (deviceProp.warpSize != WARPSIZE) {
fprintf(stderr, "Warp size must be %d\n", deviceProp.warpSize);
exit(-1);
}
blocks = deviceProp.multiProcessorCount;
fprintf(stderr, "blocks = %d\n", blocks);
if ((WARPSIZE <= 0) || (WARPSIZE & (WARPSIZE-1) != 0)) {
fprintf(stderr, "Warp size must be greater than zero and a power of two\n");
exit(-1);
}
if (MAXDEPTH > WARPSIZE) {
fprintf(stderr, "MAXDEPTH must be less than or equal to WARPSIZE\n");
exit(-1);
}
if ((THREADS1 <= 0) || (THREADS1 & (THREADS1-1) != 0)) {
fprintf(stderr, "THREADS1 must be greater than zero and a power of two\n");
exit(-1);
}
// set L1/shared memory configuration
cudaFuncSetCacheConfig(BoundingBoxKernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(TreeBuildingKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(SummarizationKernel, cudaFuncCachePreferShared);
cudaFuncSetCacheConfig(SortKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(ForceCalculationKernel, cudaFuncCachePreferL1);
cudaFuncSetCacheConfig(IntegrationKernel, cudaFuncCachePreferL1);
cudaGetLastError(); // reset error value
for (run = 0; run < 3; run++) {
for (i = 0; i < 8; i++) timing[i] = 0.0f;
nbodies = atoi(argv[1]);
if (nbodies < 1) {
fprintf(stderr, "nbodies is too small: %d\n", nbodies);
exit(-1);
}
if (nbodies > (1 << 30)) {
fprintf(stderr, "nbodies is too large: %d\n", nbodies);
exit(-1);
}
nnodes = nbodies * 2;
if (nnodes < 1024*blocks) nnodes = 1024*blocks;
while ((nnodes & (WARPSIZE-1)) != 0) nnodes++;
nnodes--;
timesteps = atoi(argv[2]);
dtime = 0.025; dthf = dtime * 0.5f;
epssq = 0.05 * 0.05;
itolsq = 1.0f / (0.5 * 0.5);
wb_period = atoi(argv[4]);
if (wb_period < 1) {
fprintf(stderr, "wb_period is too small: %d\n", wb_period);
exit(-1);
}
// allocate memory
if (run == 0) {
fprintf(stderr, "nodes = %d\n", nnodes+1);
fprintf(stderr, "configuration: %d bodies, %d time steps\n", nbodies, timesteps);
mass = (float *)malloc(sizeof(float) * nbodies);
if (mass == NULL) {fprintf(stderr, "cannot allocate mass\n"); exit(-1);}
posx = (float *)malloc(sizeof(float) * nbodies);
if (posx == NULL) {fprintf(stderr, "cannot allocate posx\n"); exit(-1);}
posy = (float *)malloc(sizeof(float) * nbodies);
if (posy == NULL) {fprintf(stderr, "cannot allocate posy\n"); exit(-1);}
posz = (float *)malloc(sizeof(float) * nbodies);
if (posz == NULL) {fprintf(stderr, "cannot allocate posz\n"); exit(-1);}
velx = (float *)malloc(sizeof(float) * nbodies);
if (velx == NULL) {fprintf(stderr, "cannot allocate velx\n"); exit(-1);}
vely = (float *)malloc(sizeof(float) * nbodies);
if (vely == NULL) {fprintf(stderr, "cannot allocate vely\n"); exit(-1);}
velz = (float *)malloc(sizeof(float) * nbodies);
if (velz == NULL) {fprintf(stderr, "cannot allocate velz\n"); exit(-1);}
if (cudaSuccess != cudaMalloc((void **)&errl, sizeof(int))) fprintf(stderr, "could not allocate errd\n"); CudaTest("couldn't allocate errd");
if (cudaSuccess != cudaMalloc((void **)&childl, sizeof(int) * (nnodes+1) * 8)) fprintf(stderr, "could not allocate childd\n"); CudaTest("couldn't allocate childd");
if (cudaSuccess != cudaMalloc((void **)&massl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate massd\n"); CudaTest("couldn't allocate massd");
if (cudaSuccess != cudaMalloc((void **)&posxl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posxd\n"); CudaTest("couldn't allocate posxd");
if (cudaSuccess != cudaMalloc((void **)&posyl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate posyd\n"); CudaTest("couldn't allocate posyd");
if (cudaSuccess != cudaMalloc((void **)&poszl, sizeof(float) * (nnodes+1))) fprintf(stderr, "could not allocate poszd\n"); CudaTest("couldn't allocate poszd");
if (cudaSuccess != cudaMalloc((void **)&countl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate countd\n"); CudaTest("couldn't allocate countd");
if (cudaSuccess != cudaMalloc((void **)&startl, sizeof(int) * (nnodes+1))) fprintf(stderr, "could not allocate startd\n"); CudaTest("couldn't allocate startd");
// alias arrays
int inc = (nbodies + WARPSIZE - 1) & (-WARPSIZE);
velxl = (float *)&childl[0*inc];
velyl = (float *)&childl[1*inc];
velzl = (float *)&childl[2*inc];
accxl = (float *)&childl[3*inc];
accyl = (float *)&childl[4*inc];
acczl = (float *)&childl[5*inc];
sortl = (int *)&childl[6*inc];
if (cudaSuccess != cudaMalloc((void **)&maxxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxxd\n"); CudaTest("couldn't allocate maxxd");
if (cudaSuccess != cudaMalloc((void **)&maxyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxyd\n"); CudaTest("couldn't allocate maxyd");
if (cudaSuccess != cudaMalloc((void **)&maxzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate maxzd\n"); CudaTest("couldn't allocate maxzd");
if (cudaSuccess != cudaMalloc((void **)&minxl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minxd\n"); CudaTest("couldn't allocate minxd");
if (cudaSuccess != cudaMalloc((void **)&minyl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minyd\n"); CudaTest("couldn't allocate minyd");
if (cudaSuccess != cudaMalloc((void **)&minzl, sizeof(float) * blocks)) fprintf(stderr, "could not allocate minzd\n"); CudaTest("couldn't allocate minzd");
if (cudaSuccess != cudaMemcpyToSymbol(nnodesd, &nnodes, sizeof(int))) fprintf(stderr, "copying of nnodes to device failed\n"); CudaTest("nnode copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(nbodiesd, &nbodies, sizeof(int))) fprintf(stderr, "copying of nbodies to device failed\n"); CudaTest("nbody copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(errd, &errl, sizeof(void*))) fprintf(stderr, "copying of err to device failed\n"); CudaTest("err copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(dtimed, &dtime, sizeof(float))) fprintf(stderr, "copying of dtime to device failed\n"); CudaTest("dtime copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(dthfd, &dthf, sizeof(float))) fprintf(stderr, "copying of dthf to device failed\n"); CudaTest("dthf copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(epssqd, &epssq, sizeof(float))) fprintf(stderr, "copying of epssq to device failed\n"); CudaTest("epssq copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(itolsqd, &itolsq, sizeof(float))) fprintf(stderr, "copying of itolsq to device failed\n"); CudaTest("itolsq copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(sortd, &sortl, sizeof(void*))) fprintf(stderr, "copying of sortl to device failed\n"); CudaTest("sortl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(countd, &countl, sizeof(void*))) fprintf(stderr, "copying of countl to device failed\n"); CudaTest("countl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(startd, &startl, sizeof(void*))) fprintf(stderr, "copying of startl to device failed\n"); CudaTest("startl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(childd, &childl, sizeof(void*))) fprintf(stderr, "copying of childl to device failed\n"); CudaTest("childl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(massd, &massl, sizeof(void*))) fprintf(stderr, "copying of massl to device failed\n"); CudaTest("massl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(posxd, &posxl, sizeof(void*))) fprintf(stderr, "copying of posxl to device failed\n"); CudaTest("posxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(posyd, &posyl, sizeof(void*))) fprintf(stderr, "copying of posyl to device failed\n"); CudaTest("posyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(poszd, &poszl, sizeof(void*))) fprintf(stderr, "copying of poszl to device failed\n"); CudaTest("poszl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velxd, &velxl, sizeof(void*))) fprintf(stderr, "copying of velxl to device failed\n"); CudaTest("velxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velyd, &velyl, sizeof(void*))) fprintf(stderr, "copying of velyl to device failed\n"); CudaTest("velyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(velzd, &velzl, sizeof(void*))) fprintf(stderr, "copying of velzl to device failed\n"); CudaTest("velzl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(accxd, &accxl, sizeof(void*))) fprintf(stderr, "copying of accxl to device failed\n"); CudaTest("accxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(accyd, &accyl, sizeof(void*))) fprintf(stderr, "copying of accyl to device failed\n"); CudaTest("accyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(acczd, &acczl, sizeof(void*))) fprintf(stderr, "copying of acczl to device failed\n"); CudaTest("acczl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxxd, &maxxl, sizeof(void*))) fprintf(stderr, "copying of maxxl to device failed\n"); CudaTest("maxxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxyd, &maxyl, sizeof(void*))) fprintf(stderr, "copying of maxyl to device failed\n"); CudaTest("maxyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(maxzd, &maxzl, sizeof(void*))) fprintf(stderr, "copying of maxzl to device failed\n"); CudaTest("maxzl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minxd, &minxl, sizeof(void*))) fprintf(stderr, "copying of minxl to device failed\n"); CudaTest("minxl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minyd, &minyl, sizeof(void*))) fprintf(stderr, "copying of minyl to device failed\n"); CudaTest("minyl copy to device failed");
if (cudaSuccess != cudaMemcpyToSymbol(minzd, &minzl, sizeof(void*))) fprintf(stderr, "copying of minzl to device failed\n"); CudaTest("minzl copy to device failed");
}
// Open system file
char *filePath = "system16384.dat";
FILE *fh = fopen(argv[3], "r");
if (!fh) {
printf("E: Unable to open %s (read-only)\n", argv[3]);
exit(EXIT_FAILURE);
}
// Load particles from file
char line[256];
int tmp;
i = 0;
do {
// Read line
line[0] = '\0';
fgets(line, 256, fh);
// Grab body
tmp = sscanf(line, " %f %f %f %f %f %f %f ", &posx[i], &posy[i], &posz[i],
&velx[i], &vely[i], &velz[i], &mass[i]);
if (tmp != 7)
continue;
i++;
if (i >= nbodies) {
break;
}
} while (!feof(fh));
// generate input
#if 0
drndset(7);
rsc = (3 * 3.1415926535897932384626433832795) / 16;
vsc = sqrt(1.0 / rsc);
for (i = 0; i < nbodies; i++) {
mass[i] = 1.0 / nbodies;
r = 1.0 / sqrt(pow(drnd()*0.999, -2.0/3.0) - 1);
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = rsc * r / sqrt(sq);
posx[i] = x * scale;
posy[i] = y * scale;
posz[i] = z * scale;
do {
x = drnd();
y = drnd() * 0.1;
} while (y > x*x * pow(1 - x*x, 3.5));
v = x * sqrt(2.0 / sqrt(1 + r*r));
do {
x = drnd()*2.0 - 1.0;
y = drnd()*2.0 - 1.0;
z = drnd()*2.0 - 1.0;
sq = x*x + y*y + z*z;
} while (sq > 1.0);
scale = vsc * v / sqrt(sq);
velx[i] = x * scale;
vely[i] = y * scale;
velz[i] = z * scale;
}
#endif
if (cudaSuccess != cudaMemcpy(massl, mass, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of mass to device failed\n"); CudaTest("mass copy to device failed");
if (cudaSuccess != cudaMemcpy(posxl, posx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posx to device failed\n"); CudaTest("posx copy to device failed");
if (cudaSuccess != cudaMemcpy(posyl, posy, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posy to device failed\n"); CudaTest("posy copy to device failed");
if (cudaSuccess != cudaMemcpy(poszl, posz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of posz to device failed\n"); CudaTest("posz copy to device failed");
if (cudaSuccess != cudaMemcpy(velxl, velx, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velx to device failed\n"); CudaTest("velx copy to device failed");
if (cudaSuccess != cudaMemcpy(velyl, vely, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of vely to device failed\n"); CudaTest("vely copy to device failed");
if (cudaSuccess != cudaMemcpy(velzl, velz, sizeof(float) * nbodies, cudaMemcpyHostToDevice)) fprintf(stderr, "copying of velz to device failed\n"); CudaTest("velz copy to device failed");
// run timesteps (lauch GPU kernels)
cudaEventCreate(&start); cudaEventCreate(&stop);
starttime = clock();
cudaEventRecord(start, 0);
InitializationKernel<<<1, 1>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[0] += time;
CudaTest("kernel 0 launch failed");
for (step = 0; step < timesteps; step++) {
cudaEventRecord(start, 0);
BoundingBoxKernel<<<blocks * FACTOR1, THREADS1>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[1] += time;
CudaTest("kernel 1 launch failed");
cudaEventRecord(start, 0);
TreeBuildingKernel<<<blocks * FACTOR2, THREADS2>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[2] += time;
CudaTest("kernel 2 launch failed");
cudaEventRecord(start, 0);
SummarizationKernel<<<blocks * FACTOR3, THREADS3>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[3] += time;
CudaTest("kernel 3 launch failed");
cudaEventRecord(start, 0);
SortKernel<<<blocks * FACTOR4, THREADS4>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[4] += time;
CudaTest("kernel 4 launch failed");
cudaEventRecord(start, 0);
ForceCalculationKernel<<<blocks * FACTOR5, THREADS5>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[5] += time;
CudaTest("kernel 5 launch failed");
cudaEventRecord(start, 0);
IntegrationKernel<<<blocks * FACTOR6, THREADS6>>>();
cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&time, start, stop);
timing[6] += time;
CudaTest("kernel 6 launch failed");
cudaEventRecord(start, 0);
if ((step % wb_period) == 0) {
// transfer result back to CPU
//fprintf(stderr, "Copying result back to CPU!\n");
if (cudaSuccess != cudaMemcpy(&error, errl, sizeof(int), cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of err from device failed\n"); CudaTest("err copy from device failed");
if (cudaSuccess != cudaMemcpy(posx, posxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posx from device failed\n"); CudaTest("posx copy from device failed");
if (cudaSuccess != cudaMemcpy(posy, posyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posy from device failed\n"); CudaTest("posy copy from device failed");
if (cudaSuccess != cudaMemcpy(posz, poszl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of posz from device failed\n"); CudaTest("posz copy from device failed");
if (cudaSuccess != cudaMemcpy(velx, velxl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velx from device failed\n"); CudaTest("velx copy from device failed");
if (cudaSuccess != cudaMemcpy(vely, velyl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of vely from device failed\n"); CudaTest("vely copy from device failed");
if (cudaSuccess != cudaMemcpy(velz, velzl, sizeof(float) * nbodies, cudaMemcpyDeviceToHost)) fprintf(stderr, "copying of velz from device failed\n"); CudaTest("velz copy from device failed");
}
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
timing[7] += time;
}
endtime = clock();
CudaTest("kernel launch failed");
cudaEventDestroy(start); cudaEventDestroy(stop);
runtime = (int) (1000.0f * (endtime - starttime) / CLOCKS_PER_SEC);
fprintf(stderr, "runtime: %d ms (", runtime);
time = 0;
for (i = 1; i < 8; i++) {
fprintf(stderr, " %.1f ", timing[i]);
time += timing[i];
}
if (error == 0) {
fprintf(stderr, ") = %.1f\n", time);
} else {
fprintf(stderr, ") = %.1f FAILED %d\n", time, error);
}
if ((run == 0) || (mintime > runtime)) mintime = runtime;
}
fprintf(stderr, "mintime: %d ms\n", mintime);
// print output
// for (i = 0; i < nbodies; i++) {
printf("%.2e %.2e %.2e\n", posx[i], posy[i], posz[i]);
// }
free(mass);
free(posx);
free(posy);
free(posz);
free(velx);
free(vely);
free(velz);
cudaFree(errl);
cudaFree(childl);
cudaFree(massl);
cudaFree(posxl);
cudaFree(posyl);
cudaFree(poszl);
cudaFree(countl);
cudaFree(startl);
cudaFree(maxxl);
cudaFree(maxyl);
cudaFree(maxzl);
cudaFree(minxl);
cudaFree(minyl);
cudaFree(minzl);
return 0;
}
|
796726a972a8741cb5e06ea571085030acdd5ccc.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample is a templatized version of the template project.
* It also shows how to correctly templatize dynamically allocated shared
* memory arrays.
* Device code.
*/
#ifndef _TEMPLATE_KERNEL_H_
#define _TEMPLATE_KERNEL_H_
#include <stdio.h>
#include "sharedmem.cuh"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
template<class T>
__global__ void
testKernel( T* g_idata, T* g_odata)
{
// Shared mem size is determined by the host app at run time
SharedMemory<T> smem;
T* sdata = smem.getPointer();
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
sdata[tid] = g_idata[tid];
__syncthreads();
// perform some computations
sdata[tid] = (T) num_threads * sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
#endif // #ifndef _TEMPLATE_KERNEL_H_
| 796726a972a8741cb5e06ea571085030acdd5ccc.cu | /*
* Copyright 1993-2010 NVIDIA Corporation. All rights reserved.
*
* Please refer to the NVIDIA end user license agreement (EULA) associated
* with this source code for terms and conditions that govern your use of
* this software. Any use, reproduction, disclosure, or distribution of
* this software and related documentation outside the terms of the EULA
* is strictly prohibited.
*
*/
/* This sample is a templatized version of the template project.
* It also shows how to correctly templatize dynamically allocated shared
* memory arrays.
* Device code.
*/
#ifndef _TEMPLATE_KERNEL_H_
#define _TEMPLATE_KERNEL_H_
#include <stdio.h>
#include "sharedmem.cuh"
////////////////////////////////////////////////////////////////////////////////
//! Simple test kernel for device functionality
//! @param g_idata input data in global memory
//! @param g_odata output data in global memory
////////////////////////////////////////////////////////////////////////////////
template<class T>
__global__ void
testKernel( T* g_idata, T* g_odata)
{
// Shared mem size is determined by the host app at run time
SharedMemory<T> smem;
T* sdata = smem.getPointer();
// access thread id
const unsigned int tid = threadIdx.x;
// access number of threads in this block
const unsigned int num_threads = blockDim.x;
// read in input data from global memory
sdata[tid] = g_idata[tid];
__syncthreads();
// perform some computations
sdata[tid] = (T) num_threads * sdata[tid];
__syncthreads();
// write data to global memory
g_odata[tid] = sdata[tid];
}
#endif // #ifndef _TEMPLATE_KERNEL_H_
|
ea2eebc50c8d1ae8a01a28cc8538bb8266ee2eac.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <stdio.h>
#define N 64
#define TPB 32
__device__ float scale(int i, int n)
{
return ((float) i) / (n - 1);
}
__device__ float distance(float x1, float x2)
{
return fabsf(x2 - x1);
}
__global__ void distanceKernel(float* d_out, float ref, float len)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = scale(i, len);
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to %f is %f./n", i, ref, x, d_out[i]);
}
int main()
{
const float ref = 0.5f;
float* d_out = 0;
hipMalloc(&d_out, N * sizeof(float));
hipLaunchKernelGGL(( distanceKernel) , dim3(N/TPB), dim3(TPB), 0, 0, d_out, ref, N);
hipFree(d_out);
return 0;
} | ea2eebc50c8d1ae8a01a28cc8538bb8266ee2eac.cu | #include <stdio.h>
#define N 64
#define TPB 32
__device__ float scale(int i, int n)
{
return ((float) i) / (n - 1);
}
__device__ float distance(float x1, float x2)
{
return fabsf(x2 - x1);
}
__global__ void distanceKernel(float* d_out, float ref, float len)
{
const int i = blockIdx.x * blockDim.x + threadIdx.x;
const float x = scale(i, len);
d_out[i] = distance(x, ref);
printf("i = %2d: dist from %f to %f is %f./n", i, ref, x, d_out[i]);
}
int main()
{
const float ref = 0.5f;
float* d_out = 0;
cudaMalloc(&d_out, N * sizeof(float));
distanceKernel <<<N/TPB, TPB>>>(d_out, ref, N);
cudaFree(d_out);
return 0;
} |
253a47f92d607ed6455800b44a26445c5379a32a.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <glog/logging.h>
#include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
namespace details {
template <typename T>
struct Add {
__device__ T operator()(const T &a, const T &b) const { return a + b; }
};
template <typename T>
struct Mul {
__device__ T operator()(const T &a, const T &b) const { return a * b; }
};
template <typename T>
struct Div {
__device__ T operator()(const T &a, const T &b) const { return a / b; }
};
template <typename T>
struct Sub {
__device__ T operator()(const T &a, const T &b) const { return a - b; }
};
template <typename T>
struct Pow {
__device__ T operator()(const T &a, const T &b) const {
return static_cast<T>(::powf(static_cast<float>(a), static_cast<float>(b)));
}
};
} // namespace details
template <typename T, typename Operator>
__global__ void elementwise_kernel(const size_t total,
const T *x_data,
const T *y_data,
T *out_data,
int pre,
int n,
int post,
Operator op) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
int idx = tid / post % n;
#if __CUDA_ARCH__ >= 350
out_data[tid] = op(__ldg(x_data + tid), __ldg(y_data + idx));
#else
out_data[tid] = op(x_data[tid], y_data[idx]);
#endif
}
}
nvinfer1::Dims ElementWisePlugin::getOutputDimensions(
int index, const nvinfer1::Dims *input_dims, int num_inputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"There is only one output in TRT elementwise "
"op plugin, but got output index: %d.",
index));
PADDLE_ENFORCE_EQ(
num_inputs,
2,
platform::errors::InvalidArgument("There are 2 inputs in TRT elementwise "
"op plugin, but got input number: %d.",
num_inputs));
PADDLE_ENFORCE_NOT_NULL(
input_dims,
platform::errors::InvalidArgument(
"The input dims of TRT elementwise op plugin should not be null."));
return input_dims[0];
}
int ElementWisePlugin::initialize() TRT_NOEXCEPT {
axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_;
int trimed_nb_dims = dims_y_.nbDims;
for (; trimed_nb_dims > 0; --trimed_nb_dims) {
if (dims_y_.d[trimed_nb_dims - 1] != 1) {
break;
}
}
dims_y_.nbDims = trimed_nb_dims;
PADDLE_ENFORCE_GE(dims_x_.nbDims,
dims_y_.nbDims + axis_,
platform::errors::InvalidArgument(
"We expect [number of x dims] >= [number of y dims + "
"axis] in TRT elementwise op plugin, but got [number "
"of x dims] = %d, [number of y dims + axis] = %d.",
dims_x_.nbDims,
dims_y_.nbDims + axis_));
PADDLE_ENFORCE_LT(
axis_,
dims_x_.nbDims,
platform::errors::InvalidArgument("We expect [axis] < [number of x dims] "
"in TRT elementwise op plugin, but got "
"[axis] = %d, [number of x dims] = %d.",
axis_,
dims_x_.nbDims));
prev_size_ = 1;
midd_size_ = 1;
post_size_ = 1;
for (int i = 0; i < axis_; ++i) {
prev_size_ *= dims_x_.d[i];
}
for (int i = 0; i < dims_y_.nbDims; ++i) {
PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_],
dims_y_.d[i],
platform::errors::InvalidArgument(
"Broadcast dimension mismatch. The dims of input Y "
"should be a subsequence of X."));
midd_size_ *= dims_y_.d[i];
}
for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) {
post_size_ *= dims_x_.d[i];
}
return 0;
}
int ElementWisePlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
#else
void *const *outputs,
void *workspace,
#endif
hipStream_t stream) TRT_NOEXCEPT {
const float *x = reinterpret_cast<const float *>(inputs[0]);
const float *y = reinterpret_cast<const float *>(inputs[1]);
float *out = reinterpret_cast<float *>(outputs[0]);
int num = batch_size * prev_size_ * midd_size_ * post_size_;
int thread = 256;
int block = (num + thread - 1) / thread;
if (type_ == "add") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Add<float>());
} else if (type_ == "mul") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Mul<float>());
} else if (type_ == "div") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Div<float>());
} else if (type_ == "sub") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Sub<float>());
} else if (type_ == "pow") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream, num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Pow<float>());
} else {
PADDLE_THROW(platform::errors::Fatal(
"The %s type elementwise is not implemented in trt plugin.", type_));
}
return hipGetLastError() != hipSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int ElementwisePluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
size_t ElementwisePluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(type_.c_str()) + SerializedSize(axis_);
}
void ElementwisePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, type_.c_str());
SerializeValue(&buffer, axis_);
}
nvinfer1::DimsExprs ElementwisePluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputs,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputs[0];
}
bool ElementwisePluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType ElementwisePluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The Elementwise Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int ElementwisePluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
hipStream_t stream) TRT_NOEXCEPT {
auto x_dims = input_desc[0].dims;
auto y_dims = input_desc[1].dims;
int axis = (axis_ == -1) ? x_dims.nbDims - y_dims.nbDims : axis_;
int batch_size = x_dims.d[0];
int prev_size = 1;
int midd_size = 1;
int post_size = 1;
for (int i = 0; i < axis; ++i) {
prev_size *= x_dims.d[i];
}
int trimed_nb_dims = y_dims.nbDims;
for (; trimed_nb_dims > 0; --trimed_nb_dims) {
if (y_dims.d[trimed_nb_dims - 1] != 1) {
break;
}
}
for (int i = 0; i < trimed_nb_dims; ++i) {
PADDLE_ENFORCE_EQ(x_dims.d[i + axis],
y_dims.d[i],
platform::errors::InvalidArgument(
"Broadcast dimension mismatch found in trt "
"elementwise plugin's x and y input."));
midd_size *= y_dims.d[i];
}
for (int i = axis + trimed_nb_dims; i < x_dims.nbDims; ++i) {
post_size *= x_dims.d[i];
}
const float *x = static_cast<const float *>(inputs[0]);
const float *y = static_cast<const float *>(inputs[1]);
float *out = static_cast<float *>(outputs[0]);
int num = prev_size * midd_size * post_size;
int thread = 256;
int block = (num + thread - 1) / thread;
if (type_ == "add") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size, midd_size, post_size, details::Add<float>());
} else if (type_ == "mul") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>());
} else if (type_ == "div") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size, midd_size, post_size, details::Div<float>());
} else if (type_ == "sub") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size, midd_size, post_size, details::Sub<float>());
} else if (type_ == "pow") {
hipLaunchKernelGGL(( elementwise_kernel), dim3(block), dim3(thread), 0, stream,
num, x, y, out, prev_size, midd_size, post_size, details::Pow<float>());
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Paddle-TRT only support elementwise "
"operation: {add, mul, div, sub, pow} currently, "
"but got %s.",
type_));
}
return hipGetLastError() != hipSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
| 253a47f92d607ed6455800b44a26445c5379a32a.cu | /* Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. */
#include <glog/logging.h>
#include "paddle/fluid/inference/tensorrt/plugin/elementwise_op_plugin.h"
namespace paddle {
namespace inference {
namespace tensorrt {
namespace plugin {
namespace details {
template <typename T>
struct Add {
__device__ T operator()(const T &a, const T &b) const { return a + b; }
};
template <typename T>
struct Mul {
__device__ T operator()(const T &a, const T &b) const { return a * b; }
};
template <typename T>
struct Div {
__device__ T operator()(const T &a, const T &b) const { return a / b; }
};
template <typename T>
struct Sub {
__device__ T operator()(const T &a, const T &b) const { return a - b; }
};
template <typename T>
struct Pow {
__device__ T operator()(const T &a, const T &b) const {
return static_cast<T>(::powf(static_cast<float>(a), static_cast<float>(b)));
}
};
} // namespace details
template <typename T, typename Operator>
__global__ void elementwise_kernel(const size_t total,
const T *x_data,
const T *y_data,
T *out_data,
int pre,
int n,
int post,
Operator op) {
int tid = blockIdx.x * blockDim.x + threadIdx.x;
if (tid < total) {
int idx = tid / post % n;
#if __CUDA_ARCH__ >= 350
out_data[tid] = op(__ldg(x_data + tid), __ldg(y_data + idx));
#else
out_data[tid] = op(x_data[tid], y_data[idx]);
#endif
}
}
nvinfer1::Dims ElementWisePlugin::getOutputDimensions(
int index, const nvinfer1::Dims *input_dims, int num_inputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"There is only one output in TRT elementwise "
"op plugin, but got output index: %d.",
index));
PADDLE_ENFORCE_EQ(
num_inputs,
2,
platform::errors::InvalidArgument("There are 2 inputs in TRT elementwise "
"op plugin, but got input number: %d.",
num_inputs));
PADDLE_ENFORCE_NOT_NULL(
input_dims,
platform::errors::InvalidArgument(
"The input dims of TRT elementwise op plugin should not be null."));
return input_dims[0];
}
int ElementWisePlugin::initialize() TRT_NOEXCEPT {
axis_ = (axis_ == -1) ? dims_x_.nbDims - dims_y_.nbDims : axis_;
int trimed_nb_dims = dims_y_.nbDims;
for (; trimed_nb_dims > 0; --trimed_nb_dims) {
if (dims_y_.d[trimed_nb_dims - 1] != 1) {
break;
}
}
dims_y_.nbDims = trimed_nb_dims;
PADDLE_ENFORCE_GE(dims_x_.nbDims,
dims_y_.nbDims + axis_,
platform::errors::InvalidArgument(
"We expect [number of x dims] >= [number of y dims + "
"axis] in TRT elementwise op plugin, but got [number "
"of x dims] = %d, [number of y dims + axis] = %d.",
dims_x_.nbDims,
dims_y_.nbDims + axis_));
PADDLE_ENFORCE_LT(
axis_,
dims_x_.nbDims,
platform::errors::InvalidArgument("We expect [axis] < [number of x dims] "
"in TRT elementwise op plugin, but got "
"[axis] = %d, [number of x dims] = %d.",
axis_,
dims_x_.nbDims));
prev_size_ = 1;
midd_size_ = 1;
post_size_ = 1;
for (int i = 0; i < axis_; ++i) {
prev_size_ *= dims_x_.d[i];
}
for (int i = 0; i < dims_y_.nbDims; ++i) {
PADDLE_ENFORCE_EQ(dims_x_.d[i + axis_],
dims_y_.d[i],
platform::errors::InvalidArgument(
"Broadcast dimension mismatch. The dims of input Y "
"should be a subsequence of X."));
midd_size_ *= dims_y_.d[i];
}
for (int i = axis_ + dims_y_.nbDims; i < dims_x_.nbDims; ++i) {
post_size_ *= dims_x_.d[i];
}
return 0;
}
int ElementWisePlugin::enqueue(int batch_size,
const void *const *inputs,
#if IS_TRT_VERSION_LT(8000)
void **outputs,
void *workspace,
#else
void *const *outputs,
void *workspace,
#endif
cudaStream_t stream) TRT_NOEXCEPT {
const float *x = reinterpret_cast<const float *>(inputs[0]);
const float *y = reinterpret_cast<const float *>(inputs[1]);
float *out = reinterpret_cast<float *>(outputs[0]);
int num = batch_size * prev_size_ * midd_size_ * post_size_;
int thread = 256;
int block = (num + thread - 1) / thread;
if (type_ == "add") {
elementwise_kernel<<<block, thread, 0, stream>>>(num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Add<float>());
} else if (type_ == "mul") {
elementwise_kernel<<<block, thread, 0, stream>>>(num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Mul<float>());
} else if (type_ == "div") {
elementwise_kernel<<<block, thread, 0, stream>>>(num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Div<float>());
} else if (type_ == "sub") {
elementwise_kernel<<<block, thread, 0, stream>>>(num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Sub<float>());
} else if (type_ == "pow") {
elementwise_kernel<<<block, thread, 0, stream>>>(num,
x,
y,
out,
prev_size_,
batch_size * midd_size_,
post_size_,
details::Pow<float>());
} else {
PADDLE_THROW(platform::errors::Fatal(
"The %s type elementwise is not implemented in trt plugin.", type_));
}
return cudaGetLastError() != cudaSuccess;
}
// Dynamic Plugin below.
#if IS_TRT_VERSION_GE(6000)
int ElementwisePluginDynamic::initialize() TRT_NOEXCEPT { return 0; }
size_t ElementwisePluginDynamic::getSerializationSize() const TRT_NOEXCEPT {
return SerializedSize(type_.c_str()) + SerializedSize(axis_);
}
void ElementwisePluginDynamic::serialize(void *buffer) const TRT_NOEXCEPT {
SerializeValue(&buffer, type_.c_str());
SerializeValue(&buffer, axis_);
}
nvinfer1::DimsExprs ElementwisePluginDynamic::getOutputDimensions(
int output_index,
const nvinfer1::DimsExprs *inputs,
int nb_inputs,
nvinfer1::IExprBuilder &expr_builder) TRT_NOEXCEPT {
return inputs[0];
}
bool ElementwisePluginDynamic::supportsFormatCombination(
int pos,
const nvinfer1::PluginTensorDesc *in_out,
int nb_inputs,
int nb_outputs) TRT_NOEXCEPT {
PADDLE_ENFORCE_NOT_NULL(
in_out,
platform::errors::InvalidArgument(
"The input of swish plugin shoule not be nullptr."));
PADDLE_ENFORCE_LT(
pos,
nb_inputs + nb_outputs,
platform::errors::InvalidArgument("The pos(%d) should be less than the "
"num(%d) of the input and the output.",
pos,
nb_inputs + nb_outputs));
(in_out && pos < (nb_inputs + nb_outputs));
const nvinfer1::PluginTensorDesc &in = in_out[pos];
if (pos == 0) {
return (in.type == nvinfer1::DataType::kFLOAT) &&
(in.format == nvinfer1::TensorFormat::kLINEAR);
}
const nvinfer1::PluginTensorDesc &prev = in_out[pos - 1];
// output
return in.type == prev.type && in.format == prev.format;
}
nvinfer1::DataType ElementwisePluginDynamic::getOutputDataType(
int index,
const nvinfer1::DataType *input_types,
int nb_inputs) const TRT_NOEXCEPT {
PADDLE_ENFORCE_EQ(index,
0,
platform::errors::InvalidArgument(
"The Elementwise Plugin only has one input, so the "
"index value should be 0, but get %d.",
index));
return input_types[0];
}
int ElementwisePluginDynamic::enqueue(
const nvinfer1::PluginTensorDesc *input_desc,
const nvinfer1::PluginTensorDesc *output_desc,
const void *const *inputs,
void *const *outputs,
void *workspace,
cudaStream_t stream) TRT_NOEXCEPT {
auto x_dims = input_desc[0].dims;
auto y_dims = input_desc[1].dims;
int axis = (axis_ == -1) ? x_dims.nbDims - y_dims.nbDims : axis_;
int batch_size = x_dims.d[0];
int prev_size = 1;
int midd_size = 1;
int post_size = 1;
for (int i = 0; i < axis; ++i) {
prev_size *= x_dims.d[i];
}
int trimed_nb_dims = y_dims.nbDims;
for (; trimed_nb_dims > 0; --trimed_nb_dims) {
if (y_dims.d[trimed_nb_dims - 1] != 1) {
break;
}
}
for (int i = 0; i < trimed_nb_dims; ++i) {
PADDLE_ENFORCE_EQ(x_dims.d[i + axis],
y_dims.d[i],
platform::errors::InvalidArgument(
"Broadcast dimension mismatch found in trt "
"elementwise plugin's x and y input."));
midd_size *= y_dims.d[i];
}
for (int i = axis + trimed_nb_dims; i < x_dims.nbDims; ++i) {
post_size *= x_dims.d[i];
}
const float *x = static_cast<const float *>(inputs[0]);
const float *y = static_cast<const float *>(inputs[1]);
float *out = static_cast<float *>(outputs[0]);
int num = prev_size * midd_size * post_size;
int thread = 256;
int block = (num + thread - 1) / thread;
if (type_ == "add") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Add<float>());
} else if (type_ == "mul") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Mul<float>());
} else if (type_ == "div") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Div<float>());
} else if (type_ == "sub") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Sub<float>());
} else if (type_ == "pow") {
elementwise_kernel<<<block, thread, 0, stream>>>(
num, x, y, out, prev_size, midd_size, post_size, details::Pow<float>());
} else {
PADDLE_THROW(platform::errors::Unimplemented(
"Paddle-TRT only support elementwise "
"operation: {add, mul, div, sub, pow} currently, "
"but got %s.",
type_));
}
return cudaGetLastError() != cudaSuccess;
}
#endif
} // namespace plugin
} // namespace tensorrt
} // namespace inference
} // namespace paddle
|
38ab36036d649d89bf43c196ba33e922370ff252.hip | // !!! This is a file automatically generated by hipify!!!
/*
* ImageQuilting.cu
*
* Created on: 07-Oct-2015
* Author: mobin
*/
#include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include"opencv2/highgui/highgui.hpp"
#include<cuda_runtime.h>
#include<time.h>
#include <sys/time.h>
#include <unistd.h>
#include <hip/hip_runtime.h>
using std::cout;
using std::endl;
class ImageQuilting {
public:
void imageQuilting(cv::Mat input, cv::Mat output) {
}
};
| 38ab36036d649d89bf43c196ba33e922370ff252.cu | /*
* ImageQuilting.cu
*
* Created on: 07-Oct-2015
* Author: mobin
*/
#include<iostream>
#include<cstdio>
#include<opencv2/core/core.hpp>
#include"opencv2/highgui/highgui.hpp"
#include<cuda_runtime.h>
#include<time.h>
#include <sys/time.h>
#include <unistd.h>
#include <cuda.h>
using std::cout;
using std::endl;
class ImageQuilting {
public:
void imageQuilting(cv::Mat input, cv::Mat output) {
}
};
|
7086e5f37253aed79ba532a859dea63c598c6eba.hip | // !!! This is a file automatically generated by hipify!!!
//
// auto-generated by ops.py//
//header
#define OPS_API 2
#define OPS_2D
#define OPS_SOA
#include "ops_lib_cpp.h"
#include "ops_cuda_rt_support.h"
#include "ops_cuda_reduction.h"
#include <hip/hip_complex.h>
#ifdef OPS_MPI
#include "ops_mpi_core.h"
#endif
// global constants
void ops_init_backend() {}
void ops_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "multidim_kernel_cuda_kernel.cu"
#include "multidim_copy_kernel_cuda_kernel.cu"
#include "multidim_reduce_kernel_cuda_kernel.cu"
| 7086e5f37253aed79ba532a859dea63c598c6eba.cu | //
// auto-generated by ops.py//
//header
#define OPS_API 2
#define OPS_2D
#define OPS_SOA
#include "ops_lib_cpp.h"
#include "ops_cuda_rt_support.h"
#include "ops_cuda_reduction.h"
#include <cuComplex.h>
#ifdef OPS_MPI
#include "ops_mpi_core.h"
#endif
// global constants
void ops_init_backend() {}
void ops_decl_const_char(int dim, char const *type,
int size, char *dat, char const *name){
{
printf("error: unknown const name\n"); exit(1);
}
}
//user kernel files
#include "multidim_kernel_cuda_kernel.cu"
#include "multidim_copy_kernel_cuda_kernel.cu"
#include "multidim_reduce_kernel_cuda_kernel.cu"
|
d53653eb372f3b545d6a94828d8e2ee15ba4a69a.hip | // !!! This is a file automatically generated by hipify!!!
#include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cstring>
#include <string>
#include <cassert>
#include <iostream>
#include <fstream>
#include <hip/hip_runtime.h>
#include <rocblas.h>
#include "device_launch_parameters.h"
using namespace std;
__global__ void copyBias(float *O, float *Z, int N, int M) {
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid<N){
O[tid] = Z[tid%M];
}
}
__global__ void sineActivation(float *O, float *Z, int N, float weight=30.0) {
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid<N){
O[tid] = sin(weight*Z[tid]);
}
}
__global__ void fillCoordinateMatrixCUDA(float* X, float start_x, float start_y, float diff_x, float diff_y, int RESX, int RESY){
int idx;
int tidx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tidy = (blockIdx.y * blockDim.y) + threadIdx.y;
if(tidx < RESX && tidy < RESY){
idx = 2*(tidx*RESY + tidy);
X[idx++] = start_x + tidx*(diff_x);
X[idx++] = start_y + tidy*(diff_y);
}
}
void readIntoArray(float* arr, ifstream* inFile, int SIZE){
if (inFile->is_open())
{
for (int i = 0; i < SIZE; i++)
{
*inFile >> arr[i];
}
inFile->close();
}
}
void fillCoordinateMatrix(float* X, int STARTX, int STARTY, int ENDX, int ENDY, int RESX, int RESY, int HEIGHT, int WIDTH){
float start_x = STARTX/(HEIGHT-1.0);
start_x -= 0.5;
start_x *= 2.0;
float start_y = STARTY/(HEIGHT-1.0);
start_y -= 0.5;
start_y *= 2.0;
float diff_x = 2*((ENDX-STARTX)/(HEIGHT-1.0))/RESX;
float diff_y = 2*((ENDY-STARTY)/(HEIGHT-1.0))/RESY;
int idx=0;
float tmp = start_y;
for(int i=0;i<RESX;i++){
for(int j=0;j<RESY;j++){
X[idx++] = start_x;
X[idx++] = tmp;
tmp += diff_y;
}
start_x += diff_x;
tmp = start_y;
}
}
int main(int argc, char* argv[]){
int INP_DIM = 2;
int OUT_DIM = 3;
// ArgParse
int NUM_LAYERS, DIM, HEIGHT, RESX, RESY, STARTX, STARTY, ENDX, ENDY, PRINT_TIME;
NUM_LAYERS = atoi(argv[1]);
DIM = atoi(argv[2]);
HEIGHT = atoi(argv[3]);
RESX = atoi(argv[4]);
RESY = atoi(argv[5]);
STARTX = atoi(argv[6]);
STARTY = atoi(argv[7]);
ENDX = atoi(argv[8]);
ENDY = atoi(argv[9]);
PRINT_TIME = atoi(argv[10]);
float start_x = STARTX/(HEIGHT-1.0);
start_x -= 0.5;
start_x *= 2.0;
float start_y = STARTY/(HEIGHT-1.0);
start_y -= 0.5;
start_y *= 2.0;
float diff_x = 2*((ENDX-STARTX)/(HEIGHT-1.0))/RESX;
float diff_y = 2*((ENDY-STARTY)/(HEIGHT-1.0))/RESY;
ifstream inFile;
float* W;
float* B;
float* Z;
float* X;
int weightSize = DIM*DIM;
int biasSize = DIM;
int COORDS = RESX*RESY;
int outputSize = COORDS*DIM;
float alpha = 1.0f;
float beta = 1.0f;
int idx = 0;
int NUM_THREADS=1024;
int NUM_BLOCKS;
float time;
hipEvent_t start, stop;
hipblasHandle_t handle;
hipblasCreate(&handle);
hipMallocManaged(&Z, outputSize*sizeof(float));
hipMallocManaged(&W, weightSize*sizeof(float));
hipMallocManaged(&B, biasSize*sizeof(float));
hipMallocManaged(&X, COORDS*DIM*sizeof(float));
dim3 threads(32, 32);
dim3 blocks(ceil((float)RESX/32), ceil((float)RESY/32));
hipLaunchKernelGGL(( fillCoordinateMatrixCUDA), dim3(blocks), dim3(threads), 0, 0, X, start_x, start_y, diff_x, diff_y, RESX, RESY);
hipDeviceSynchronize();
hipEventCreate(&start);
hipEventCreate(&stop);
hipEventRecord(start, 0);
NUM_BLOCKS=ceil((float)(COORDS*DIM)/NUM_THREADS);
for(int layer=0;layer<NUM_LAYERS;layer++){
string weightsfileName = "weightsT/net."+to_string(layer)+".linear.weight";
string biasfileName = "weightsT/net."+to_string(layer)+".linear.bias";
inFile.open(weightsfileName.c_str());
if(layer == 0){
readIntoArray(W, &inFile, DIM*INP_DIM);
}
else{
readIntoArray(W, &inFile, weightSize);
}
inFile.open(biasfileName.c_str());
readIntoArray(B, &inFile, biasSize);
hipLaunchKernelGGL(( copyBias), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, Z, B, COORDS*biasSize, biasSize);
hipDeviceSynchronize();
if(layer == 0){
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, DIM, COORDS, INP_DIM, &alpha, W, DIM, X, INP_DIM,
&beta, Z, DIM);
}
else{
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, DIM, COORDS, DIM, &alpha, W, DIM, X, DIM,
&beta, Z, DIM);
}
hipDeviceSynchronize();
hipLaunchKernelGGL(( sineActivation), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, X, Z, COORDS*DIM);
hipDeviceSynchronize();
}
string weightsfileName = "weightsT/last_layer.linear.weight";
string biasfileName = "weightsT/last_layer.linear.bias";
inFile.open(weightsfileName.c_str());
readIntoArray(W, &inFile, DIM*OUT_DIM);
inFile.open(biasfileName.c_str());
readIntoArray(B, &inFile, OUT_DIM);
idx=0;
hipLaunchKernelGGL(( copyBias), dim3(NUM_BLOCKS), dim3(NUM_THREADS), 0, 0, Z, B, COORDS*biasSize, biasSize);
hipDeviceSynchronize();
hipblasSgemm(handle, HIPBLAS_OP_N, HIPBLAS_OP_N, OUT_DIM, COORDS, DIM, &alpha, W, OUT_DIM, X, DIM,
&beta, Z, OUT_DIM);
hipDeviceSynchronize();
hipEventRecord(stop, 0);
hipEventSynchronize(stop);
hipEventElapsedTime(&time, start, stop);
if(PRINT_TIME){
cout<<"Time Taken: "<<time/1000<<endl;
}
else{
idx = 0;
for(int i=0;i<COORDS;i++){
for(int j=0;j<OUT_DIM;j++){
cout<<Z[idx++]<<endl;
}
}
}
hipFree(W);
hipFree(Z);
hipFree(B);
hipFree(X);
}
| d53653eb372f3b545d6a94828d8e2ee15ba4a69a.cu | #include <cstdio>
#include <cstdlib>
#include <cmath>
#include <cstring>
#include <string>
#include <cassert>
#include <iostream>
#include <fstream>
#include <cuda_runtime.h>
#include <cublas_v2.h>
#include "device_launch_parameters.h"
using namespace std;
__global__ void copyBias(float *O, float *Z, int N, int M) {
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid<N){
O[tid] = Z[tid%M];
}
}
__global__ void sineActivation(float *O, float *Z, int N, float weight=30.0) {
int tid = (blockIdx.x * blockDim.x) + threadIdx.x;
if(tid<N){
O[tid] = sin(weight*Z[tid]);
}
}
__global__ void fillCoordinateMatrixCUDA(float* X, float start_x, float start_y, float diff_x, float diff_y, int RESX, int RESY){
int idx;
int tidx = (blockIdx.x * blockDim.x) + threadIdx.x;
int tidy = (blockIdx.y * blockDim.y) + threadIdx.y;
if(tidx < RESX && tidy < RESY){
idx = 2*(tidx*RESY + tidy);
X[idx++] = start_x + tidx*(diff_x);
X[idx++] = start_y + tidy*(diff_y);
}
}
void readIntoArray(float* arr, ifstream* inFile, int SIZE){
if (inFile->is_open())
{
for (int i = 0; i < SIZE; i++)
{
*inFile >> arr[i];
}
inFile->close();
}
}
void fillCoordinateMatrix(float* X, int STARTX, int STARTY, int ENDX, int ENDY, int RESX, int RESY, int HEIGHT, int WIDTH){
float start_x = STARTX/(HEIGHT-1.0);
start_x -= 0.5;
start_x *= 2.0;
float start_y = STARTY/(HEIGHT-1.0);
start_y -= 0.5;
start_y *= 2.0;
float diff_x = 2*((ENDX-STARTX)/(HEIGHT-1.0))/RESX;
float diff_y = 2*((ENDY-STARTY)/(HEIGHT-1.0))/RESY;
int idx=0;
float tmp = start_y;
for(int i=0;i<RESX;i++){
for(int j=0;j<RESY;j++){
X[idx++] = start_x;
X[idx++] = tmp;
tmp += diff_y;
}
start_x += diff_x;
tmp = start_y;
}
}
int main(int argc, char* argv[]){
int INP_DIM = 2;
int OUT_DIM = 3;
// ArgParse
int NUM_LAYERS, DIM, HEIGHT, RESX, RESY, STARTX, STARTY, ENDX, ENDY, PRINT_TIME;
NUM_LAYERS = atoi(argv[1]);
DIM = atoi(argv[2]);
HEIGHT = atoi(argv[3]);
RESX = atoi(argv[4]);
RESY = atoi(argv[5]);
STARTX = atoi(argv[6]);
STARTY = atoi(argv[7]);
ENDX = atoi(argv[8]);
ENDY = atoi(argv[9]);
PRINT_TIME = atoi(argv[10]);
float start_x = STARTX/(HEIGHT-1.0);
start_x -= 0.5;
start_x *= 2.0;
float start_y = STARTY/(HEIGHT-1.0);
start_y -= 0.5;
start_y *= 2.0;
float diff_x = 2*((ENDX-STARTX)/(HEIGHT-1.0))/RESX;
float diff_y = 2*((ENDY-STARTY)/(HEIGHT-1.0))/RESY;
ifstream inFile;
float* W;
float* B;
float* Z;
float* X;
int weightSize = DIM*DIM;
int biasSize = DIM;
int COORDS = RESX*RESY;
int outputSize = COORDS*DIM;
float alpha = 1.0f;
float beta = 1.0f;
int idx = 0;
int NUM_THREADS=1024;
int NUM_BLOCKS;
float time;
cudaEvent_t start, stop;
cublasHandle_t handle;
cublasCreate(&handle);
cudaMallocManaged(&Z, outputSize*sizeof(float));
cudaMallocManaged(&W, weightSize*sizeof(float));
cudaMallocManaged(&B, biasSize*sizeof(float));
cudaMallocManaged(&X, COORDS*DIM*sizeof(float));
dim3 threads(32, 32);
dim3 blocks(ceil((float)RESX/32), ceil((float)RESY/32));
fillCoordinateMatrixCUDA<<<blocks, threads>>>(X, start_x, start_y, diff_x, diff_y, RESX, RESY);
cudaDeviceSynchronize();
cudaEventCreate(&start);
cudaEventCreate(&stop);
cudaEventRecord(start, 0);
NUM_BLOCKS=ceil((float)(COORDS*DIM)/NUM_THREADS);
for(int layer=0;layer<NUM_LAYERS;layer++){
string weightsfileName = "weightsT/net."+to_string(layer)+".linear.weight";
string biasfileName = "weightsT/net."+to_string(layer)+".linear.bias";
inFile.open(weightsfileName.c_str());
if(layer == 0){
readIntoArray(W, &inFile, DIM*INP_DIM);
}
else{
readIntoArray(W, &inFile, weightSize);
}
inFile.open(biasfileName.c_str());
readIntoArray(B, &inFile, biasSize);
copyBias<<<NUM_BLOCKS, NUM_THREADS>>>(Z, B, COORDS*biasSize, biasSize);
cudaDeviceSynchronize();
if(layer == 0){
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, DIM, COORDS, INP_DIM, &alpha, W, DIM, X, INP_DIM,
&beta, Z, DIM);
}
else{
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, DIM, COORDS, DIM, &alpha, W, DIM, X, DIM,
&beta, Z, DIM);
}
cudaDeviceSynchronize();
sineActivation<<<NUM_BLOCKS, NUM_THREADS>>>(X, Z, COORDS*DIM);
cudaDeviceSynchronize();
}
string weightsfileName = "weightsT/last_layer.linear.weight";
string biasfileName = "weightsT/last_layer.linear.bias";
inFile.open(weightsfileName.c_str());
readIntoArray(W, &inFile, DIM*OUT_DIM);
inFile.open(biasfileName.c_str());
readIntoArray(B, &inFile, OUT_DIM);
idx=0;
copyBias<<<NUM_BLOCKS, NUM_THREADS>>>(Z, B, COORDS*biasSize, biasSize);
cudaDeviceSynchronize();
cublasSgemm(handle, CUBLAS_OP_N, CUBLAS_OP_N, OUT_DIM, COORDS, DIM, &alpha, W, OUT_DIM, X, DIM,
&beta, Z, OUT_DIM);
cudaDeviceSynchronize();
cudaEventRecord(stop, 0);
cudaEventSynchronize(stop);
cudaEventElapsedTime(&time, start, stop);
if(PRINT_TIME){
cout<<"Time Taken: "<<time/1000<<endl;
}
else{
idx = 0;
for(int i=0;i<COORDS;i++){
for(int j=0;j<OUT_DIM;j++){
cout<<Z[idx++]<<endl;
}
}
}
cudaFree(W);
cudaFree(Z);
cudaFree(B);
cudaFree(X);
}
|
cd3d1e21221715d399c2d1ea052518d3fff71dff.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 19.04.2018
// @author [email protected]
//
#include <system/op_boilerplate.h>
#include <ops/declarable/helpers/activations.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ void preluCuda(const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz) {
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ Nd4jLong xzLen;
__shared__ int xzRank, yRank;
if (threadIdx.x == 0) {
xzLen = shape::length(xShapeInfo);
xzRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[MAX_RANK];
for (int i = tid; i < xzLen; i += blockDim.x * gridDim.x) {
shape::index2coords(i, xShapeInfo, coords);
const auto xzOffset = shape::getOffset(xShapeInfo, coords);
const auto xVal = x[xzOffset];
if(xVal < 0) {
for (uint j = 0; j < yRank; ++j)
if(yShapeInfo[j + 1] == 1)
coords[j + 1] = 0;
z[xzOffset] = xVal * y[shape::getOffset(yShapeInfo, coords + 1)];
}
else
z[xzOffset] = xVal;
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
linkage void preluCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz) {
hipLaunchKernelGGL(( preluCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xShapeInfo, vy, yShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void prelu(sd::LaunchContext * context, const NDArray& input, const NDArray& alpha, NDArray& output) {
PointersManager manager(context, "prelu");
const int threadsPerBlock = 256;
const int blocksPerGrid = 512;
const int sharedMem = 512;
const auto xType = input.dataType();
const auto yType = alpha.dataType();
NDArray::prepareSpecialUse({&output}, {&input, &alpha});
BUILD_SINGLE_SELECTOR_TWICE(xType, preluCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), alpha.getSpecialBuffer(), alpha.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input, &alpha});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ linkage void preluBPCuda(const void *vIn, const Nd4jLong *inShapeInfo,
const void *vAlpha, const Nd4jLong *alphaShapeInfo,
const void *vdLdO, const Nd4jLong *dLdOShapeInfo,
void *vdLdI, const Nd4jLong *dLdIShapeInfo,
void *vdLdA, const Nd4jLong *dLdAShapeInfo) {
const auto in = reinterpret_cast<const X*>(vIn);
const auto alpha = reinterpret_cast<const Y*>(vAlpha);
const auto dLdO = reinterpret_cast<const Y*>(vdLdO);
auto dLdI = reinterpret_cast<Y*>(vdLdI);
auto dLdA = reinterpret_cast<Y*>(vdLdA);
__shared__ Nd4jLong inLen, totalThreads;
__shared__ int inRank, alphaRank;
if (threadIdx.x == 0) {
inLen = shape::length(inShapeInfo);
totalThreads = gridDim.x * blockDim.x;
inRank = shape::rank(inShapeInfo);
alphaRank = shape::rank(alphaShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[MAX_RANK];
for (int i = tid; i < inLen; i += totalThreads) {
shape::index2coords(i, inShapeInfo, coords);
const auto inOffset = shape::getOffset(inShapeInfo, coords);
const auto dLdOOffset = shape::getOffset(dLdOShapeInfo, coords);
const auto dLdIOffset = shape::getOffset(dLdIShapeInfo, coords);
const auto xVal = in[inOffset];
const auto grO = dLdO[dLdOOffset];
if(xVal < 0) {
for (uint j = 0; j < alphaRank; ++j)
if(alphaShapeInfo[j + 1] == 1)
coords[j + 1] = 0;
const auto alphaOffset = shape::getOffset(alphaShapeInfo, coords + 1);
const auto dLdAOffset = shape::getOffset(dLdAShapeInfo, coords + 1);
dLdI[dLdIOffset] = grO * alpha[alphaOffset];
sd::math::atomics::nd4j_atomicAdd<Y>(&dLdA[dLdAOffset], static_cast<Y>(grO * xVal));
}
else
dLdI[dLdIOffset] = grO;
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ linkage void preluBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream, const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo) {
hipLaunchKernelGGL(( preluBPCuda<X, Y>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vIn, inShapeInfo, vAlpha, alphaShapeInfo, vdLdO, dLdOShapeInfo, vdLdI, dLdIShapeInfo, vdLdA, dLdAShapeInfo);
}
//////////////////////////////////////////////////////////////////////////
void preluBP(sd::LaunchContext* context, const NDArray& input, const NDArray& alpha, const NDArray& dLdO, NDArray& dLdI, NDArray& dLdA) {
dLdA.nullify();
PointersManager manager(context, "preluBP");
const int threadsPerBlock = 256;
const int blocksPerGrid = 512;
const int sharedMem = 512;
const auto xType = input.dataType();
const auto zType = alpha.dataType();
NDArray::prepareSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO});
BUILD_SINGLE_SELECTOR_TWICE(xType, preluBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), alpha.getSpecialBuffer(), alpha.getSpecialShapeInfo(), dLdO.getSpecialBuffer(), dLdO.getSpecialShapeInfo(), dLdI.getSpecialBuffer(), dLdI.getSpecialShapeInfo(), dLdA.getSpecialBuffer(), dLdA.getSpecialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__device__ void softMaxForVectorCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong xOffset = shape::getIndexOffset(elemIdx, xShapeInfo);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[xOffset] : sd::math::nd4j_max<T>(x[xOffset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong xOffset = shape::getIndexOffset(elemIdx, xShapeInfo);
const Nd4jLong zOffset = shape::getIndexOffset(elemIdx, zShapeInfo);
z[zOffset] = sd::math::nd4j_exp<T, T>(x[xOffset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[zOffset] : (z[zOffset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate z[offset] / sum ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong zOffset = shape::getIndexOffset(elemIdx, zShapeInfo);
z[zOffset] /= shmem[0];
}
}
template<typename T>
__global__ void softMaxForVectorCudaGlobal(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) {
softMaxForVectorCuda<T>(vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void softMaxForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) {
hipLaunchKernelGGL(( softMaxForVectorCudaGlobal<T>), dim3(1), dim3(MAX_NUM_THREADS / 4) , (MAX_NUM_THREADS / 4) * sizeof(T) + 512, *stream, vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void softMaxCuda(const void* vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets,
void* vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
const auto* xTad = x + xOffsets[blockIdx.x];
auto* zTad = z + zOffsets[blockIdx.x];
softMaxForVectorCuda<T>(xTad, xTadShapeInfo, zTad, zTadShapeInfo);
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void softMaxCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const hipStream_t *stream,
const void* vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets,
void* vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets) {
hipLaunchKernelGGL(( softMaxCuda<T>), dim3(blocksPerGrid), dim3(threadsPerBlock), sharedMem, *stream, vx, xTadShapeInfo, xOffsets, vz, zTadShapeInfo, zOffsets);
}
//////////////////////////////////////////////////////////////////////////
void softmax(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
PointersManager manager(context, "helpers::softmax");
if(input.isVector()) {
if(rank == 1 || input.sizeAt(dimension) != 1) {
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
}
else
output = 1.;
}
else {
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), {dimension});
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), {dimension});
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = packZ.numberOfTads();
const int sharedMem = input.sizeOfT() * threadsPerBlock + 512;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), packX.specialShapeInfo(), packX.specialOffsets(), output.specialBuffer(), packZ.specialShapeInfo(), packZ.specialOffsets()), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
// auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
// (input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
// auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
// output /= sumAlongDim;
// input.tickReadDevice();
}
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void logSoftMaxForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : sd::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same time evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate log(z[offset] / sum) ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::nd4j_log<T,T>(z[offset] / shmem[0]);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void logSoftMaxForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
hipLaunchKernelGGL(( logSoftMaxForVectorCuda<T>), dim3(1), dim3(MAX_NUM_THREADS), MAX_NUM_THREADS * sizeof(T) + 512, *stream, vx, xzShapeInfo, vz);
}
//////////////////////////////////////////////////////////////////////////
void logSoftmax(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
if(input.isVector()) {
if(rank == 1 || input.sizeAt(dimension) != 1) {
BUILD_SINGLE_SELECTOR(input.dataType(), logSoftMaxForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else
output = 0.;
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output.applyTransform(transform::Log, output);
input.tickReadDevice();
}
PointersManager manager(context, "helpers::logSoftmax");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ linkage void softMaxDerivForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : sd::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate (z[offset] / sum) and derivative z[offset] = z[offset] * (1 - z[offset]) ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] /= shmem[0];
z[offset] *= (1.f - z[offset]); // derivative
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void softMaxDerivForVectorCudaLauncher(const hipStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
hipLaunchKernelGGL(( softMaxDerivForVectorCuda<T>), dim3(1), dim3(MAX_NUM_THREADS), MAX_NUM_THREADS * sizeof(T) + 512, *stream, vx, xzShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void softmaxDerivative(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
int temp;
if(shape::isCommonVector(input.getShapeInfo(), temp)) {
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxDerivForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output *= (1.f - output); // derivative
input.tickReadDevice();
}
PointersManager manager(context, "helpers::softmaxDerivative");
manager.synchronize();
output.tickWriteDevice();
}
template <typename T>
linkage void thresholdRelu_(NDArray const& input, double threshold, NDArray& output) {
auto routine = LAMBDA_T(_x, threshold) {
return _x > (T)threshold ? _x: (T)0.f;
};
const_cast<NDArray&>(input).applyLambda(routine, output);
}
void thresholdRelu(sd::LaunchContext * context, NDArray const& input, double threshold, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), thresholdRelu_, (input, threshold, output), FLOAT_TYPES);
}
template <typename T>
linkage void thresholdReluDerivative_(NDArray* input, double theta, NDArray* dLdO, NDArray* output) {
auto derivative = LAMBDA_TT(_x, grO, theta) {if (_x > theta) return grO; else return static_cast<T>(0); };
input->applyPairwiseLambda(*dLdO, derivative, *output);
}
void thresholdReluDerivative(sd::LaunchContext * context, NDArray* input, double threshold, NDArray* dLdO, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), thresholdReluDerivative_, (input, threshold, dLdO, output), FLOAT_TYPES);
}
}
}
}
| cd3d1e21221715d399c2d1ea052518d3fff71dff.cu | /*******************************************************************************
* Copyright (c) 2015-2018 Skymind, Inc.
*
* This program and the accompanying materials are made available under the
* terms of the Apache License, Version 2.0 which is available at
* https://www.apache.org/licenses/LICENSE-2.0.
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations
* under the License.
*
* SPDX-License-Identifier: Apache-2.0
******************************************************************************/
//
// @author Yurii Shyrma ([email protected]), created on 19.04.2018
// @author [email protected]
//
#include <system/op_boilerplate.h>
#include <ops/declarable/helpers/activations.h>
#include <helpers/ShapeUtils.h>
#include <numeric>
#include <helpers/PointersManager.h>
#include <helpers/ConstantTadHelper.h>
namespace sd {
namespace ops {
namespace helpers {
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ void preluCuda(const void *vx, const Nd4jLong *xShapeInfo,
const void *vy, const Nd4jLong *yShapeInfo,
void *vz) {
const auto x = reinterpret_cast<const X*>(vx);
const auto y = reinterpret_cast<const Y*>(vy);
auto z = reinterpret_cast<X*>(vz);
__shared__ Nd4jLong xzLen;
__shared__ int xzRank, yRank;
if (threadIdx.x == 0) {
xzLen = shape::length(xShapeInfo);
xzRank = shape::rank(xShapeInfo);
yRank = shape::rank(yShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[MAX_RANK];
for (int i = tid; i < xzLen; i += blockDim.x * gridDim.x) {
shape::index2coords(i, xShapeInfo, coords);
const auto xzOffset = shape::getOffset(xShapeInfo, coords);
const auto xVal = x[xzOffset];
if(xVal < 0) {
for (uint j = 0; j < yRank; ++j)
if(yShapeInfo[j + 1] == 1)
coords[j + 1] = 0;
z[xzOffset] = xVal * y[shape::getOffset(yShapeInfo, coords + 1)];
}
else
z[xzOffset] = xVal;
}
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
linkage void preluCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vx, const Nd4jLong *xShapeInfo, const void *vy, const Nd4jLong *yShapeInfo, void *vz) {
preluCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xShapeInfo, vy, yShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void prelu(sd::LaunchContext * context, const NDArray& input, const NDArray& alpha, NDArray& output) {
PointersManager manager(context, "prelu");
const int threadsPerBlock = 256;
const int blocksPerGrid = 512;
const int sharedMem = 512;
const auto xType = input.dataType();
const auto yType = alpha.dataType();
NDArray::prepareSpecialUse({&output}, {&input, &alpha});
BUILD_SINGLE_SELECTOR_TWICE(xType, preluCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), alpha.getSpecialBuffer(), alpha.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input, &alpha});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__global__ linkage void preluBPCuda(const void *vIn, const Nd4jLong *inShapeInfo,
const void *vAlpha, const Nd4jLong *alphaShapeInfo,
const void *vdLdO, const Nd4jLong *dLdOShapeInfo,
void *vdLdI, const Nd4jLong *dLdIShapeInfo,
void *vdLdA, const Nd4jLong *dLdAShapeInfo) {
const auto in = reinterpret_cast<const X*>(vIn);
const auto alpha = reinterpret_cast<const Y*>(vAlpha);
const auto dLdO = reinterpret_cast<const Y*>(vdLdO);
auto dLdI = reinterpret_cast<Y*>(vdLdI);
auto dLdA = reinterpret_cast<Y*>(vdLdA);
__shared__ Nd4jLong inLen, totalThreads;
__shared__ int inRank, alphaRank;
if (threadIdx.x == 0) {
inLen = shape::length(inShapeInfo);
totalThreads = gridDim.x * blockDim.x;
inRank = shape::rank(inShapeInfo);
alphaRank = shape::rank(alphaShapeInfo);
}
__syncthreads();
const auto tid = blockIdx.x * blockDim.x + threadIdx.x;
int coords[MAX_RANK];
for (int i = tid; i < inLen; i += totalThreads) {
shape::index2coords(i, inShapeInfo, coords);
const auto inOffset = shape::getOffset(inShapeInfo, coords);
const auto dLdOOffset = shape::getOffset(dLdOShapeInfo, coords);
const auto dLdIOffset = shape::getOffset(dLdIShapeInfo, coords);
const auto xVal = in[inOffset];
const auto grO = dLdO[dLdOOffset];
if(xVal < 0) {
for (uint j = 0; j < alphaRank; ++j)
if(alphaShapeInfo[j + 1] == 1)
coords[j + 1] = 0;
const auto alphaOffset = shape::getOffset(alphaShapeInfo, coords + 1);
const auto dLdAOffset = shape::getOffset(dLdAShapeInfo, coords + 1);
dLdI[dLdIOffset] = grO * alpha[alphaOffset];
sd::math::atomics::nd4j_atomicAdd<Y>(&dLdA[dLdAOffset], static_cast<Y>(grO * xVal));
}
else
dLdI[dLdIOffset] = grO;
}
}
//////////////////////////////////////////////////////////////////////////
template<typename X, typename Y>
__host__ linkage void preluBPCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream, const void *vIn, const Nd4jLong *inShapeInfo, const void *vAlpha, const Nd4jLong *alphaShapeInfo, const void *vdLdO, const Nd4jLong *dLdOShapeInfo, void *vdLdI, const Nd4jLong *dLdIShapeInfo, void *vdLdA, const Nd4jLong *dLdAShapeInfo) {
preluBPCuda<X, Y><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vIn, inShapeInfo, vAlpha, alphaShapeInfo, vdLdO, dLdOShapeInfo, vdLdI, dLdIShapeInfo, vdLdA, dLdAShapeInfo);
}
//////////////////////////////////////////////////////////////////////////
void preluBP(sd::LaunchContext* context, const NDArray& input, const NDArray& alpha, const NDArray& dLdO, NDArray& dLdI, NDArray& dLdA) {
dLdA.nullify();
PointersManager manager(context, "preluBP");
const int threadsPerBlock = 256;
const int blocksPerGrid = 512;
const int sharedMem = 512;
const auto xType = input.dataType();
const auto zType = alpha.dataType();
NDArray::prepareSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO});
BUILD_SINGLE_SELECTOR_TWICE(xType, preluBPCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), alpha.getSpecialBuffer(), alpha.getSpecialShapeInfo(), dLdO.getSpecialBuffer(), dLdO.getSpecialShapeInfo(), dLdI.getSpecialBuffer(), dLdI.getSpecialShapeInfo(), dLdA.getSpecialBuffer(), dLdA.getSpecialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({&dLdI, &dLdA}, {&input, &alpha, &dLdO});
manager.synchronize();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__device__ void softMaxForVectorCuda(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong xOffset = shape::getIndexOffset(elemIdx, xShapeInfo);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[xOffset] : sd::math::nd4j_max<T>(x[xOffset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong xOffset = shape::getIndexOffset(elemIdx, xShapeInfo);
const Nd4jLong zOffset = shape::getIndexOffset(elemIdx, zShapeInfo);
z[zOffset] = sd::math::nd4j_exp<T, T>(x[xOffset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[zOffset] : (z[zOffset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate z[offset] / sum ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong zOffset = shape::getIndexOffset(elemIdx, zShapeInfo);
z[zOffset] /= shmem[0];
}
}
template<typename T>
__global__ void softMaxForVectorCudaGlobal(const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) {
softMaxForVectorCuda<T>(vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void softMaxForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xShapeInfo, void *vz, const Nd4jLong *zShapeInfo) {
softMaxForVectorCudaGlobal<T><<<1, MAX_NUM_THREADS / 4 , (MAX_NUM_THREADS / 4) * sizeof(T) + 512, *stream>>>(vx, xShapeInfo, vz, zShapeInfo);
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ static void softMaxCuda(const void* vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets,
void* vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets) {
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
const auto* xTad = x + xOffsets[blockIdx.x];
auto* zTad = z + zOffsets[blockIdx.x];
softMaxForVectorCuda<T>(xTad, xTadShapeInfo, zTad, zTadShapeInfo);
}
///////////////////////////////////////////////////////////////////
template<typename T>
static void softMaxCudaLauncher(const int blocksPerGrid, const int threadsPerBlock, const int sharedMem, const cudaStream_t *stream,
const void* vx, const Nd4jLong *xTadShapeInfo, const Nd4jLong *xOffsets,
void* vz, const Nd4jLong *zTadShapeInfo, const Nd4jLong *zOffsets) {
softMaxCuda<T><<<blocksPerGrid, threadsPerBlock, sharedMem, *stream>>>(vx, xTadShapeInfo, xOffsets, vz, zTadShapeInfo, zOffsets);
}
//////////////////////////////////////////////////////////////////////////
void softmax(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
PointersManager manager(context, "helpers::softmax");
if(input.isVector()) {
if(rank == 1 || input.sizeAt(dimension) != 1) {
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer(), output.getSpecialShapeInfo()), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
}
else
output = 1.;
}
else {
auto packX = sd::ConstantTadHelper::getInstance()->tadForDimensions(input.getShapeInfo(), {dimension});
auto packZ = sd::ConstantTadHelper::getInstance()->tadForDimensions(output.getShapeInfo(), {dimension});
const int threadsPerBlock = MAX_NUM_THREADS / 4;
const int blocksPerGrid = packZ.numberOfTads();
const int sharedMem = input.sizeOfT() * threadsPerBlock + 512;
NDArray::prepareSpecialUse({&output}, {&input});
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxCudaLauncher, (blocksPerGrid, threadsPerBlock, sharedMem, context->getCudaStream(), input.getSpecialBuffer(), packX.specialShapeInfo(), packX.specialOffsets(), output.specialBuffer(), packZ.specialShapeInfo(), packZ.specialOffsets()), FLOAT_TYPES);
NDArray::registerSpecialUse({&output}, {&input});
// auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
// (input - maxAlongDim).applyTransform(transform::Exp, &output); // output contains exponents temporarily
// auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
// output /= sumAlongDim;
// input.tickReadDevice();
}
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ void logSoftMaxForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : sd::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same time evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate log(z[offset] / sum) ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::nd4j_log<T,T>(z[offset] / shmem[0]);
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void logSoftMaxForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
logSoftMaxForVectorCuda<T><<<1, MAX_NUM_THREADS, MAX_NUM_THREADS * sizeof(T) + 512, *stream>>>(vx, xzShapeInfo, vz);
}
//////////////////////////////////////////////////////////////////////////
void logSoftmax(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
if(input.isVector()) {
if(rank == 1 || input.sizeAt(dimension) != 1) {
BUILD_SINGLE_SELECTOR(input.dataType(), logSoftMaxForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else
output = 0.;
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output.applyTransform(transform::Log, output);
input.tickReadDevice();
}
PointersManager manager(context, "helpers::logSoftmax");
manager.synchronize();
output.tickWriteDevice();
}
///////////////////////////////////////////////////////////////////
template<typename T>
__global__ linkage void softMaxDerivForVectorCuda(const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
// logic of this kernel is based on assumption gridDim = 1
const auto x = reinterpret_cast<const T*>(vx);
auto z = reinterpret_cast<T*>(vz);
__shared__ Nd4jLong len;
__shared__ int numOfIters;
__shared__ T* shmem;
if (threadIdx.x == 0) {
extern __shared__ char shared[];
shmem = reinterpret_cast<T*>(shared);
len = shape::length(xzShapeInfo);
numOfIters = (len + blockDim.x - 1) / blockDim.x; // ceil (len / blockDim.x)
}
__syncthreads();
T temp = -DataTypeUtils::max<T>(); // set start value to compare with at first iteration, FIXME: what if T is unsigned ??
// ************ evaluate max element in input array x ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
shmem[threadIdx.x] = (threadIdx.x != 0) ? x[offset] : sd::math::nd4j_max<T>(x[offset], temp); // take into account max element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = -DataTypeUtils::max<T>(); // FIXME: what if T is unsigned ??
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] = sd::math::nd4j_max<T>(shmem[threadIdx.x], shmem[threadIdx.x + s]);
__syncthreads();
}
temp = shmem[0]; // save max value calculated at current iteration
}
const T max = temp;
temp = 0;
// ************ evaluate value of exp(x[offset] - max) per each element, store it to shared memory shmem ************ //
// at the same evaluate sum of exponents, sum will be stored in shmem[0]
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx < len) {
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] = sd::math::nd4j_exp<T, T>(x[offset] - max);
shmem[threadIdx.x] = (threadIdx.x != 0) ? z[offset] : (z[offset] + temp); // take into account sum element evaluated on previous iteration and stored in temp
}
else
shmem[threadIdx.x] = 0;
__syncthreads();
for (int s = blockDim.x / 2; s > 0; s /= 2) {
if(threadIdx.x < s)
shmem[threadIdx.x] += shmem[threadIdx.x + s];
__syncthreads();
}
temp = shmem[0]; // save sum calculated at current iteration
}
// ************ evaluate (z[offset] / sum) and derivative z[offset] = z[offset] * (1 - z[offset]) ************ //
for (int i = 0; i < numOfIters; ++i) {
const Nd4jLong elemIdx = i * blockDim.x + threadIdx.x;
if(elemIdx >= len) continue;
const Nd4jLong offset = shape::getIndexOffset(elemIdx, xzShapeInfo);
z[offset] /= shmem[0];
z[offset] *= (1.f - z[offset]); // derivative
}
}
///////////////////////////////////////////////////////////////////
template <typename T>
linkage void softMaxDerivForVectorCudaLauncher(const cudaStream_t* stream, const void *vx, const Nd4jLong *xzShapeInfo, void *vz) {
softMaxDerivForVectorCuda<T><<<1, MAX_NUM_THREADS, MAX_NUM_THREADS * sizeof(T) + 512, *stream>>>(vx, xzShapeInfo, vz);
}
///////////////////////////////////////////////////////////////////
void softmaxDerivative(sd::LaunchContext * context, const NDArray& input, NDArray& output, const int dimension) {
if(!input.isActualOnDeviceSide()) input.syncToDevice();
const int rank = input.rankOf();
int temp;
if(shape::isCommonVector(input.getShapeInfo(), temp)) {
BUILD_SINGLE_SELECTOR(input.dataType(), softMaxDerivForVectorCudaLauncher, (context->getCudaStream(), input.getSpecialBuffer(), input.getSpecialShapeInfo(), output.getSpecialBuffer()), FLOAT_TYPES);
input.tickReadDevice();
}
else {
auto maxAlongDim = const_cast<NDArray&>(input).reduceAlongDimension(reduce::Max, {dimension}, true);
(input - maxAlongDim).applyTransform(transform::Exp, output); // output contains exponents temporarily
auto sumAlongDim = output.reduceAlongDimension(reduce::Sum, {dimension}, true);
output /= sumAlongDim;
output *= (1.f - output); // derivative
input.tickReadDevice();
}
PointersManager manager(context, "helpers::softmaxDerivative");
manager.synchronize();
output.tickWriteDevice();
}
template <typename T>
linkage void thresholdRelu_(NDArray const& input, double threshold, NDArray& output) {
auto routine = LAMBDA_T(_x, threshold) {
return _x > (T)threshold ? _x: (T)0.f;
};
const_cast<NDArray&>(input).applyLambda(routine, output);
}
void thresholdRelu(sd::LaunchContext * context, NDArray const& input, double threshold, NDArray& output) {
BUILD_SINGLE_SELECTOR(input.dataType(), thresholdRelu_, (input, threshold, output), FLOAT_TYPES);
}
template <typename T>
linkage void thresholdReluDerivative_(NDArray* input, double theta, NDArray* dLdO, NDArray* output) {
auto derivative = LAMBDA_TT(_x, grO, theta) {if (_x > theta) return grO; else return static_cast<T>(0); };
input->applyPairwiseLambda(*dLdO, derivative, *output);
}
void thresholdReluDerivative(sd::LaunchContext * context, NDArray* input, double threshold, NDArray* dLdO, NDArray* output) {
BUILD_SINGLE_SELECTOR(input->dataType(), thresholdReluDerivative_, (input, threshold, dLdO, output), FLOAT_TYPES);
}
}
}
}
|
a3f83d6acddafb587be8c888a9207e49614e6807.hip | // !!! This is a file automatically generated by hipify!!!
#include <iostream>
#include <hip/hip_runtime.h>
#include <helper_cuda.h>
#include "hipcub/hipcub.hpp"
using namespace std;
using namespace cub;
bool testRadixSort_cub(int len, int buckets) {
int *h_key_in = new int[len];
srand(time(NULL));
for(int i = 0; i < len; i++) h_key_in[i] = rand() % buckets;
int *d_key_in;
hipMalloc(&d_key_in, len*sizeof(int));
hipMemcpy(d_key_in, h_key_in, sizeof(uint32_t) * n_elements, hipMemcpyHostToDevice);
hipDeviceSynchronize();
cudaEvent start_sort, stop_sort;
hipEventCreate(&start_sort);
hipEventCreate(&stop_sort);
void *d_temp_storage_sort = NULL;
size_t temp_storage_bytes_sort = 0;
CubDebugExit(hipcub::DeviceRadixSort::SortKeys(d_temp_storage_sort, temp_storage_bytes_sort, d_key_in, d_key_out, n_elements, 0, buckets));
hipMalloc(&d_temp_storage_sort, temp_storage_bytes_sort);
hipEventRecord(start_sort, 0);
hipcub::DeviceRadixSort::SortKeys(d_temp_storage_sort, temp_storage_bytes_sort, d_key_in, d_key_out, n_elements));
hipEventRecord(stop_sort, 0);
hipEventSynchronize(stop_sort);
hipEventElapsedTime(&temp_time, start_sort, stop_sort);
sort_time += temp_time;
if (h_key_in) delete[] h_key_in;
}
bool testRadixSort_kv_cub(int len) {
}
if(mode == 0){
random_input_generator(h_key_in, n_elements, kNumBuckets, kLogNumBuckets, bucket_d, random_mode, delta_buckets, alpha_hockey);
hipMemcpy(d_key_in, h_key_in, sizeof(uint32_t) * n_elements, hipMemcpyHostToDevice);
hipDeviceSynchronize();
// key-only sort:
void *d_temp_storage_sort = NULL;
size_t temp_storage_bytes_sort = 0;
CubDebugExit(hipcub::DeviceRadixSort::SortKeys(d_temp_storage_sort, temp_storage_bytes_sort, d_key_in, d_key_out, n_elements, 0, int(ceil(log2(float(kNumBuckets))))));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage_sort, temp_storage_bytes_sort));
hipEventRecord(start_sort, 0);
hipcub::DeviceRadixSort::SortKeys(d_temp_storage_sort, temp_storage_bytes_sort, d_key_in, d_key_out, n_elements, 0, int(ceil(log2(float(kNumBuckets)))));
hipEventRecord(stop_sort, 0);
hipEventSynchronize(stop_sort);
hipEventElapsedTime(&temp_time, start_sort, stop_sort);
sort_time += temp_time;
printf("CUB's radix sort finished in %.3f ms, %.3f Mkey/s\n", sort_time, float(n_elements)/sort_time/1000.0f);
if(validate)
{
h_cpu_results_key = new uint32_t[n_elements];
cpu_multisplit_general(h_key_in, h_cpu_results_key, n_elements, bucket_identifier, 0, kNumBuckets);
hipMemcpy(h_gpu_results_key, d_key_out, sizeof(uint32_t) * n_elements, hipMemcpyDeviceToHost);
bool correct = true;
for(int i = 0; i<n_elements && correct;i++)
{
if(h_cpu_results_key[i] != h_gpu_results_key[i]){
printf("### Wrong results at index %d: cpu = %d, gpu = %d\n", i, h_cpu_results_key[i], h_gpu_results_key[i]);
correct = false;
}
}
printf("Validation was done successfully!\n");
}
if(d_temp_storage_sort)CubDebugExit(g_allocator.DeviceFree(d_temp_storage_sort));
}
else if(mode == 10)
{
random_input_generator(h_key_in, n_elements, kNumBuckets, kLogNumBuckets, bucket_d, random_mode, delta_buckets, alpha_hockey);
for(int k = 0; k<n_elements;k++)
h_value_in[k] = h_key_in[k];
hipMemcpy(d_key_in, h_key_in, sizeof(uint32_t) * n_elements, hipMemcpyHostToDevice);
hipMemcpy(d_value_in, h_value_in, sizeof(uint32_t) * n_elements, hipMemcpyHostToDevice);
hipDeviceSynchronize();
// key-value sort:
void *d_temp_storage_sort_pairs = NULL;
size_t temp_storage_bytes_sort_pairs = 0;
CubDebugExit(hipcub::DeviceRadixSort::SortPairs(d_temp_storage_sort_pairs, temp_storage_bytes_sort_pairs, d_key_in, d_key_out, d_value_in, d_value_out, n_elements, 0, int(ceil(log2(float(kNumBuckets))))));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage_sort_pairs, temp_storage_bytes_sort_pairs));
hipEventRecord(start_sort, 0);
hipcub::DeviceRadixSort::SortPairs(d_temp_storage_sort_pairs, temp_storage_bytes_sort_pairs, d_key_in, d_key_out, d_value_in, d_value_out, n_elements, 0, int(ceil(log2(float(kNumBuckets)))));
hipEventRecord(stop_sort, 0);
hipEventSynchronize(stop_sort);
hipEventElapsedTime(&temp_time, start_sort, stop_sort);
sort_time += temp_time;
printf("CUB's key-value radix sort finished in %.3f ms, %.3f Mkey/s\n", sort_time, float(n_elements)/sort_time/1000.0f);
if(validate)
{
h_cpu_results_key = new uint32_t[n_elements];
h_cpu_results_value = new uint32_t[n_elements];
cpu_multisplit_pairs_general(h_key_in, h_cpu_results_key, h_value_in, h_cpu_results_value, n_elements, bucket_identifier, 0, kNumBuckets);
hipMemcpy(h_gpu_results_key, d_key_out, sizeof(uint32_t) * n_elements, hipMemcpyDeviceToHost);
hipMemcpy(h_gpu_results_value, d_value_out, sizeof(uint32_t) * n_elements, hipMemcpyDeviceToHost);
bool correct = true;
for(int i = 0; i<n_elements && correct;i++)
{
if((h_cpu_results_key[i] != h_gpu_results_key[i]) || (h_cpu_results_value[i] != h_gpu_results_value[i])){
printf("### Wrong results at index %d: cpu = (%d,%d), gpu = (%d,%d)\n", i, h_cpu_results_key[i], h_cpu_results_value[i], h_gpu_results_key[i], h_gpu_results_value[i]);
correct = false;
}
}
printf("Validation was done successfully!\n");
}
if(d_temp_storage_sort_pairs)CubDebugExit(g_allocator.DeviceFree(d_temp_storage_sort_pairs));
} | a3f83d6acddafb587be8c888a9207e49614e6807.cu | #include <iostream>
#include <cuda_runtime.h>
#include <helper_cuda.h>
#include "cub/device/device_scan.cuh"
using namespace std;
using namespace cub;
bool testRadixSort_cub(int len, int buckets) {
int *h_key_in = new int[len];
srand(time(NULL));
for(int i = 0; i < len; i++) h_key_in[i] = rand() % buckets;
int *d_key_in;
cudaMalloc(&d_key_in, len*sizeof(int));
cudaMemcpy(d_key_in, h_key_in, sizeof(uint32_t) * n_elements, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
cudaEvent start_sort, stop_sort;
cudaEventCreate(&start_sort);
cudaEventCreate(&stop_sort);
void *d_temp_storage_sort = NULL;
size_t temp_storage_bytes_sort = 0;
CubDebugExit(cub::DeviceRadixSort::SortKeys(d_temp_storage_sort, temp_storage_bytes_sort, d_key_in, d_key_out, n_elements, 0, buckets));
cudaMalloc(&d_temp_storage_sort, temp_storage_bytes_sort);
cudaEventRecord(start_sort, 0);
cub::DeviceRadixSort::SortKeys(d_temp_storage_sort, temp_storage_bytes_sort, d_key_in, d_key_out, n_elements));
cudaEventRecord(stop_sort, 0);
cudaEventSynchronize(stop_sort);
cudaEventElapsedTime(&temp_time, start_sort, stop_sort);
sort_time += temp_time;
if (h_key_in) delete[] h_key_in;
}
bool testRadixSort_kv_cub(int len) {
}
if(mode == 0){
random_input_generator(h_key_in, n_elements, kNumBuckets, kLogNumBuckets, bucket_d, random_mode, delta_buckets, alpha_hockey);
cudaMemcpy(d_key_in, h_key_in, sizeof(uint32_t) * n_elements, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
// key-only sort:
void *d_temp_storage_sort = NULL;
size_t temp_storage_bytes_sort = 0;
CubDebugExit(cub::DeviceRadixSort::SortKeys(d_temp_storage_sort, temp_storage_bytes_sort, d_key_in, d_key_out, n_elements, 0, int(ceil(log2(float(kNumBuckets))))));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage_sort, temp_storage_bytes_sort));
cudaEventRecord(start_sort, 0);
cub::DeviceRadixSort::SortKeys(d_temp_storage_sort, temp_storage_bytes_sort, d_key_in, d_key_out, n_elements, 0, int(ceil(log2(float(kNumBuckets)))));
cudaEventRecord(stop_sort, 0);
cudaEventSynchronize(stop_sort);
cudaEventElapsedTime(&temp_time, start_sort, stop_sort);
sort_time += temp_time;
printf("CUB's radix sort finished in %.3f ms, %.3f Mkey/s\n", sort_time, float(n_elements)/sort_time/1000.0f);
if(validate)
{
h_cpu_results_key = new uint32_t[n_elements];
cpu_multisplit_general(h_key_in, h_cpu_results_key, n_elements, bucket_identifier, 0, kNumBuckets);
cudaMemcpy(h_gpu_results_key, d_key_out, sizeof(uint32_t) * n_elements, cudaMemcpyDeviceToHost);
bool correct = true;
for(int i = 0; i<n_elements && correct;i++)
{
if(h_cpu_results_key[i] != h_gpu_results_key[i]){
printf("### Wrong results at index %d: cpu = %d, gpu = %d\n", i, h_cpu_results_key[i], h_gpu_results_key[i]);
correct = false;
}
}
printf("Validation was done successfully!\n");
}
if(d_temp_storage_sort)CubDebugExit(g_allocator.DeviceFree(d_temp_storage_sort));
}
else if(mode == 10)
{
random_input_generator(h_key_in, n_elements, kNumBuckets, kLogNumBuckets, bucket_d, random_mode, delta_buckets, alpha_hockey);
for(int k = 0; k<n_elements;k++)
h_value_in[k] = h_key_in[k];
cudaMemcpy(d_key_in, h_key_in, sizeof(uint32_t) * n_elements, cudaMemcpyHostToDevice);
cudaMemcpy(d_value_in, h_value_in, sizeof(uint32_t) * n_elements, cudaMemcpyHostToDevice);
cudaDeviceSynchronize();
// key-value sort:
void *d_temp_storage_sort_pairs = NULL;
size_t temp_storage_bytes_sort_pairs = 0;
CubDebugExit(cub::DeviceRadixSort::SortPairs(d_temp_storage_sort_pairs, temp_storage_bytes_sort_pairs, d_key_in, d_key_out, d_value_in, d_value_out, n_elements, 0, int(ceil(log2(float(kNumBuckets))))));
CubDebugExit(g_allocator.DeviceAllocate(&d_temp_storage_sort_pairs, temp_storage_bytes_sort_pairs));
cudaEventRecord(start_sort, 0);
cub::DeviceRadixSort::SortPairs(d_temp_storage_sort_pairs, temp_storage_bytes_sort_pairs, d_key_in, d_key_out, d_value_in, d_value_out, n_elements, 0, int(ceil(log2(float(kNumBuckets)))));
cudaEventRecord(stop_sort, 0);
cudaEventSynchronize(stop_sort);
cudaEventElapsedTime(&temp_time, start_sort, stop_sort);
sort_time += temp_time;
printf("CUB's key-value radix sort finished in %.3f ms, %.3f Mkey/s\n", sort_time, float(n_elements)/sort_time/1000.0f);
if(validate)
{
h_cpu_results_key = new uint32_t[n_elements];
h_cpu_results_value = new uint32_t[n_elements];
cpu_multisplit_pairs_general(h_key_in, h_cpu_results_key, h_value_in, h_cpu_results_value, n_elements, bucket_identifier, 0, kNumBuckets);
cudaMemcpy(h_gpu_results_key, d_key_out, sizeof(uint32_t) * n_elements, cudaMemcpyDeviceToHost);
cudaMemcpy(h_gpu_results_value, d_value_out, sizeof(uint32_t) * n_elements, cudaMemcpyDeviceToHost);
bool correct = true;
for(int i = 0; i<n_elements && correct;i++)
{
if((h_cpu_results_key[i] != h_gpu_results_key[i]) || (h_cpu_results_value[i] != h_gpu_results_value[i])){
printf("### Wrong results at index %d: cpu = (%d,%d), gpu = (%d,%d)\n", i, h_cpu_results_key[i], h_cpu_results_value[i], h_gpu_results_key[i], h_gpu_results_value[i]);
correct = false;
}
}
printf("Validation was done successfully!\n");
}
if(d_temp_storage_sort_pairs)CubDebugExit(g_allocator.DeviceFree(d_temp_storage_sort_pairs));
} |
89320a7193a4e00e9915a6c0fc9074d7db4a3874.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
/*__global__ void minMax(float* sortingArray,
float* min_logLum,
float* max_logLum,
int rows, int cols)
{
float min = sortingArray[0];
float max = sortingArray[0];
int theArraySizehalf = rows*cols/2;
int2 index2d= make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int index1d = index2d.x + index2d.y * cols;
int counter=1;
for(int i=1; i < theArraySizehalf; i=i*2){
if(index%(2^counter)==0 && index < (rows*cols)){
if(sortingArray[index1d]>sortingArray[index1d+i]){
return;
}*/
__global__ void global_min_kernel(float* sortingArray)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if(sortingArray[myId] > sortingArray[myId + s])
{
sortingArray[myId] = sortingArray[myId + s];
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
sortingArray[blockIdx.x] = sortingArray[myId];
}
}
__global__ void global_max_kernel(float* sortingArray)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if(sortingArray[myId] < sortingArray[myId + s])
{
sortingArray[myId] = sortingArray[myId + s];
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
sortingArray[blockIdx.x] = sortingArray[myId];
}
}
__global__ void simple_histo(int *d_bins, const float *d_in, int* runningArray, const int numBins, int lumRange, int lumMin, int totalSize)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = (myItem - lumMin) / lumRange * numBins;
runningArray[myId] = myBin;
/*
if(myId==0){
for(int i=0; i<totalSize; i++){
d_bins[runningArray[i]] ++;
}
}*/
atomicAdd(&(d_bins[myBin]), 1);
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
printf("woot1");
float* sortingArray;
checkCudaErrors(hipMalloc(&sortingArray, numRows*numCols));
checkCudaErrors(hipMemcpy(sortingArray, d_logLuminance, numRows*numCols, hipMemcpyHostToDevice));
const dim3 blockSize(32, 32);
const dim3 gridSize(numCols*numRows/1024);
hipLaunchKernelGGL(( global_min_kernel), dim3(gridSize), dim3(blockSize), 0, 0, sortingArray);
min_logLum = sortingArray[0];
hipLaunchKernelGGL(( global_max_kernel), dim3(gridSize), dim3(blockSize), 0, 0, sortingArray);
max_logLum = sortingArray[0];
int range = max_logLum-min_logLum;
printf("woot2");
int* runningArray;
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
checkCudaErrors(hipMalloc(&runningArray, numRows*numCols));
int totalSize=numCols*numRows;
for(int i=0; i<totalSize; i++){
runningArray[i]=0;
}
checkCudaErrors(hipMemcpy(runningArray, runningArray, numRows*numCols, hipMemcpyHostToDevice));
int* d_bins;
checkCudaErrors(hipMalloc(&d_bins, numBins));
for(int i=0; i<numBins; i++){
d_bins[i]=0;
}
printf("woot3");
checkCudaErrors(hipMemcpy(d_bins, d_bins, numBins, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( simple_histo), dim3(gridSize), dim3(blockSize), 0, 0, d_bins, d_logLuminance, runningArray, numBins, range, min_logLum, totalSize);
hipDeviceSynchronize(); checkCudaErrors(hipGetLastError());
d_cdf[0]=0;
for(int i=1; i<numBins; i++){
d_cdf[i]=d_cdf[i-1]+d_bins[i-1];
}
printf("woot4");
} | 89320a7193a4e00e9915a6c0fc9074d7db4a3874.cu | /* Udacity Homework 3
HDR Tone-mapping
Background HDR
==============
A High Dynamic Range (HDR) image contains a wider variation of intensity
and color than is allowed by the RGB format with 1 byte per channel that we
have used in the previous assignment.
To store this extra information we use single precision floating point for
each channel. This allows for an extremely wide range of intensity values.
In the image for this assignment, the inside of church with light coming in
through stained glass windows, the raw input floating point values for the
channels range from 0 to 275. But the mean is .41 and 98% of the values are
less than 3! This means that certain areas (the windows) are extremely bright
compared to everywhere else. If we linearly map this [0-275] range into the
[0-255] range that we have been using then most values will be mapped to zero!
The only thing we will be able to see are the very brightest areas - the
windows - everything else will appear pitch black.
The problem is that although we have cameras capable of recording the wide
range of intensity that exists in the real world our monitors are not capable
of displaying them. Our eyes are also quite capable of observing a much wider
range of intensities than our image formats / monitors are capable of
displaying.
Tone-mapping is a process that transforms the intensities in the image so that
the brightest values aren't nearly so far away from the mean. That way when
we transform the values into [0-255] we can actually see the entire image.
There are many ways to perform this process and it is as much an art as a
science - there is no single "right" answer. In this homework we will
implement one possible technique.
Background Chrominance-Luminance
================================
The RGB space that we have been using to represent images can be thought of as
one possible set of axes spanning a three dimensional space of color. We
sometimes choose other axes to represent this space because they make certain
operations more convenient.
Another possible way of representing a color image is to separate the color
information (chromaticity) from the brightness information. There are
multiple different methods for doing this - a common one during the analog
television days was known as Chrominance-Luminance or YUV.
We choose to represent the image in this way so that we can remap only the
intensity channel and then recombine the new intensity values with the color
information to form the final image.
Old TV signals used to be transmitted in this way so that black & white
televisions could display the luminance channel while color televisions would
display all three of the channels.
Tone-mapping
============
In this assignment we are going to transform the luminance channel (actually
the log of the luminance, but this is unimportant for the parts of the
algorithm that you will be implementing) by compressing its range to [0, 1].
To do this we need the cumulative distribution of the luminance values.
Example
-------
input : [2 4 3 3 1 7 4 5 7 0 9 4 3 2]
min / max / range: 0 / 9 / 9
histo with 3 bins: [4 7 3]
cdf : [4 11 14]
Your task is to calculate this cumulative distribution by following these
steps.
*/
#include "reference_calc.cpp"
#include "utils.h"
#include <stdio.h>
/*__global__ void minMax(float* sortingArray,
float* min_logLum,
float* max_logLum,
int rows, int cols)
{
float min = sortingArray[0];
float max = sortingArray[0];
int theArraySizehalf = rows*cols/2;
int2 index2d= make_int2( ( blockIdx.x * blockDim.x ) + threadIdx.x, ( blockIdx.y * blockDim.y ) + threadIdx.y );
int index1d = index2d.x + index2d.y * cols;
int counter=1;
for(int i=1; i < theArraySizehalf; i=i*2){
if(index%(2^counter)==0 && index < (rows*cols)){
if(sortingArray[index1d]>sortingArray[index1d+i]){
return;
}*/
__global__ void global_min_kernel(float* sortingArray)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if(sortingArray[myId] > sortingArray[myId + s])
{
sortingArray[myId] = sortingArray[myId + s];
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
sortingArray[blockIdx.x] = sortingArray[myId];
}
}
__global__ void global_max_kernel(float* sortingArray)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int tid = threadIdx.x;
for (unsigned int s = blockDim.x / 2; s > 0; s >>= 1)
{
if (tid < s)
{
if(sortingArray[myId] < sortingArray[myId + s])
{
sortingArray[myId] = sortingArray[myId + s];
}
}
__syncthreads(); // make sure all adds at one stage are done!
}
// only thread 0 writes result for this block back to global mem
if (tid == 0)
{
sortingArray[blockIdx.x] = sortingArray[myId];
}
}
__global__ void simple_histo(int *d_bins, const float *d_in, int* runningArray, const int numBins, int lumRange, int lumMin, int totalSize)
{
int myId = threadIdx.x + blockDim.x * blockIdx.x;
int myItem = d_in[myId];
int myBin = (myItem - lumMin) / lumRange * numBins;
runningArray[myId] = myBin;
/*
if(myId==0){
for(int i=0; i<totalSize; i++){
d_bins[runningArray[i]] ++;
}
}*/
atomicAdd(&(d_bins[myBin]), 1);
}
void your_histogram_and_prefixsum(const float* const d_logLuminance,
unsigned int* const d_cdf,
float &min_logLum,
float &max_logLum,
const size_t numRows,
const size_t numCols,
const size_t numBins)
{
//TODO
/*Here are the steps you need to implement
1) find the minimum and maximum value in the input logLuminance channel
store in min_logLum and max_logLum
2) subtract them to find the range
3) generate a histogram of all the values in the logLuminance channel using
the formula: bin = (lum[i] - lumMin) / lumRange * numBins
4) Perform an exclusive scan (prefix sum) on the histogram to get
the cumulative distribution of luminance values (this should go in the
incoming d_cdf pointer which already has been allocated for you) */
printf("woot1");
float* sortingArray;
checkCudaErrors(cudaMalloc(&sortingArray, numRows*numCols));
checkCudaErrors(cudaMemcpy(sortingArray, d_logLuminance, numRows*numCols, cudaMemcpyHostToDevice));
const dim3 blockSize(32, 32);
const dim3 gridSize(numCols*numRows/1024);
global_min_kernel<<<gridSize, blockSize>>>(sortingArray);
min_logLum = sortingArray[0];
global_max_kernel<<<gridSize, blockSize>>>(sortingArray);
max_logLum = sortingArray[0];
int range = max_logLum-min_logLum;
printf("woot2");
int* runningArray;
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
checkCudaErrors(cudaMalloc(&runningArray, numRows*numCols));
int totalSize=numCols*numRows;
for(int i=0; i<totalSize; i++){
runningArray[i]=0;
}
checkCudaErrors(cudaMemcpy(runningArray, runningArray, numRows*numCols, cudaMemcpyHostToDevice));
int* d_bins;
checkCudaErrors(cudaMalloc(&d_bins, numBins));
for(int i=0; i<numBins; i++){
d_bins[i]=0;
}
printf("woot3");
checkCudaErrors(cudaMemcpy(d_bins, d_bins, numBins, cudaMemcpyHostToDevice));
simple_histo<<<gridSize, blockSize>>>(d_bins, d_logLuminance, runningArray, numBins, range, min_logLum, totalSize);
cudaDeviceSynchronize(); checkCudaErrors(cudaGetLastError());
d_cdf[0]=0;
for(int i=1; i<numBins; i++){
d_cdf[i]=d_cdf[i-1]+d_bins[i-1];
}
printf("woot4");
} |
b4fea29afe92f968d5216a36b4dac2246fba3326.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*Matrix-Vector*/
#include <iostream>
using namespace std;
__global__
void matrixVectorKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n)
{
C[i] = 0;
for(int j=0;j<n;j++)
C[i] += A[i*n+j] * B[j];
}
}
void matrixVector(float* A, float* B, float* C, int tam)
{
int sizeA = (tam*tam) * sizeof(float);
int size = tam * sizeof(float);
float *d_A,*d_B,*d_C;
hipMalloc((void**)&d_A,sizeA);
hipMalloc((void**)&d_B,size);
hipMalloc((void**)&d_C,size);
hipMemcpy(d_A,A,sizeA,hipMemcpyHostToDevice);
hipMemcpy(d_B,B,size,hipMemcpyHostToDevice);
hipLaunchKernelGGL(( matrixVectorKernel), dim3(ceil(tam/256.0)),dim3(256), 0, 0, d_A,d_B,d_C,tam);
hipMemcpy(C,d_C,size,hipMemcpyDeviceToHost);
hipFree(d_A);hipFree(d_B);hipFree(d_C);
}
int main()
{
int n = 3;
float *h_A,*h_B,*h_C;
h_A = new float[n*n];
h_B = new float[n*n];
h_C = new float[n*n];
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_A[i*n+j] = 2;
}
for(int i = 0; i < n; i++)
{
h_B[i] = 3;
}
matrixVector(h_A,h_B,h_C,n);
for(int i = 0; i < n; i++){
cout<<h_C[i]<<" ; ";
}
cout<<endl;
return 0;
} | b4fea29afe92f968d5216a36b4dac2246fba3326.cu | /*Matrix-Vector*/
#include <iostream>
using namespace std;
__global__
void matrixVectorKernel(float* A, float* B, float* C, int n)
{
int i = threadIdx.x + (blockDim.x * blockIdx.x);
if(i<n)
{
C[i] = 0;
for(int j=0;j<n;j++)
C[i] += A[i*n+j] * B[j];
}
}
void matrixVector(float* A, float* B, float* C, int tam)
{
int sizeA = (tam*tam) * sizeof(float);
int size = tam * sizeof(float);
float *d_A,*d_B,*d_C;
cudaMalloc((void**)&d_A,sizeA);
cudaMalloc((void**)&d_B,size);
cudaMalloc((void**)&d_C,size);
cudaMemcpy(d_A,A,sizeA,cudaMemcpyHostToDevice);
cudaMemcpy(d_B,B,size,cudaMemcpyHostToDevice);
matrixVectorKernel<<<ceil(tam/256.0),256>>>(d_A,d_B,d_C,tam);
cudaMemcpy(C,d_C,size,cudaMemcpyDeviceToHost);
cudaFree(d_A);cudaFree(d_B);cudaFree(d_C);
}
int main()
{
int n = 3;
float *h_A,*h_B,*h_C;
h_A = new float[n*n];
h_B = new float[n*n];
h_C = new float[n*n];
for(int i = 0; i < n; i++)
{
for(int j = 0; j < n; j++)
h_A[i*n+j] = 2;
}
for(int i = 0; i < n; i++)
{
h_B[i] = 3;
}
matrixVector(h_A,h_B,h_C,n);
for(int i = 0; i < n; i++){
cout<<h_C[i]<<" ; ";
}
cout<<endl;
return 0;
} |
f83a53f432fd45d4ecda5e59a910332d0526f0ae.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
// Copyright 2021 Roberto Lopez Castro
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../FX_m2.cu"
#ifdef OPTLDS64
#include "store_and_transform_output_optLDS64_hip.cuh"
#include "../outer_product.cuh"
#elif OPTSTS64_CMP
#include "store_and_transform_output_optSTS64_compact.cuh"
#include "../outer_product_suffle.cuh"
#else
#include "store_and_transform_output_optSTS64.cuh"
#include "../outer_product_suffle.cuh"
#endif
#ifdef _noWALL_
typedef struct rusage resnfo;
typedef struct _timenfo {
double time;
double systime;
} timenfo;
#define timestamp(sample) getrusage(RUSAGE_SELF, (sample))
#define printtime(t) printf("%15f s (%f user + %f sys) ", \
t.time + t.systime, t.time, t.systime);
#else
typedef struct timeval resnfo;
typedef double timenfo;
#define timestamp(sample) gettimeofday((sample), 0)
#define printtime(t) printf("%15f s ", t);
#endif
#ifndef _WINOGRAD_
#define _WINOGRAD_
extern "C"
{
#define d(input, i, j) ( input[(i<<2) + (j)] )
__device__ __forceinline__ void load_and_transform_input_tile(float *Btd, float *pOutputs, int in_h, int in_w,
int tiles_dim, int in_c, int in_n, int tile_size,
int tiles_2d_dim, int tile_2d_s){
float workspace[3];
#pragma unroll
for(int j=0; j<4; j++){
workspace[0] = Btd[j];
workspace[1] = Btd[j+4];
workspace[2] = Btd[j+8];
Btd[j] = workspace[0] - workspace[2];
Btd[j+4] = workspace[1] + workspace[2];
Btd[j+8] = workspace[2] - workspace[1];
Btd[j+12] = workspace[1] - Btd[j+12];
}
int c_offset = BN*BC;
int c_tensor = threadIdx.y*BN + threadIdx.x;
#pragma unroll
for(int i=0; i<4; i++){ // prefetch 1 input tile/thread
pOutputs[c_tensor+i*c_offset*4] = d(Btd, i, 0) - d(Btd, i, 2);
pOutputs[c_tensor+i*c_offset*4+c_offset] = d(Btd, i, 1) + d(Btd, i, 2);
pOutputs[c_tensor+i*c_offset*4+2*c_offset] = d(Btd, i, 2) - d(Btd, i, 1);
pOutputs[c_tensor+i*c_offset*4+3*c_offset] = d(Btd, i, 1) - d(Btd, i, 3);
}
}
__device__ __forceinline__ void load_filter_tile(float *tiles, float *pOutputs,
int filt_c, int filt_k){
int c_tensor_s = threadIdx.y*BK + threadIdx.x;
int c_offset_s = BK*BC;
for(int k=0; k<2; k++){ // prefetch 2 filter tiles/thread
for(int i=0; i<4; i++){
#pragma unroll
for(int j=0; j<4; j++){
pOutputs[c_tensor_s + i*c_offset_s*4 + j*c_offset_s] = tiles[k*16 + i*4 + j];
}
}
c_tensor_s += BN;
}
}
__device__ __forceinline__ void prefetch_filter_tile(float *pInputs, float *tiles, int filt_k){
int c_tensor = blockIdx.z*BK + (threadIdx.y*filt_k<<4) + threadIdx.x; // Iny*filt_k*4*4
int acumm;
#pragma unroll
for(int i=0; i<4; i++){
acumm = (i*filt_k<<2);
#pragma unroll
for(int j=0; j<4; j++){
tiles[(i<<2) + j] = pInputs[acumm + j*filt_k + c_tensor];
tiles[16 + (i<<2) + j] = pInputs[acumm + j*filt_k + c_tensor+BN];
}
}
}
__device__ __forceinline__ void prefetch_input_tile(float *pInputs, float *tile, int in_h, int in_w, int in_n, int tiles_dim, short mask){
int c_tensor = (blockIdx.y%tiles_dim)*in_n*2 + (blockIdx.y/tiles_dim)*in_n*in_w*2 + blockIdx.x*BN + threadIdx.y*(in_n*in_h*in_w) + (threadIdx.x/in_n)*2*in_n + (threadIdx.x%in_n) - (in_n*in_w+in_n);
int acumm,x;
//short x1,x2;
if(mask==0xFFFF){
#pragma unroll
for(int i=0; i<4; i++){
acumm = i*in_n*in_w;
#pragma unroll
for(int j=0; j<4; j++){
tile[(i<<2) + j] = pInputs[acumm + j*in_n + c_tensor];
}
}
} else {
for(int i=0; i<4; i++){
acumm = i*in_n*in_w;
#pragma unroll
for(int j=0; j<4; j++){
x = (i<<2) + j;
tile[x] = 0;
if(mask&(1<<x))
tile[x]=pInputs[acumm + j*in_n + c_tensor];
}
}
}
}
__device__ __forceinline__ void prefetch_filter_frag(float4 *filter_frag, float4 *B_frag, int f_frag_offset, int offset1, int offset2){
*((float4*) (filter_frag)) = *(B_frag + offset1);
*((float4*) (filter_frag + 1)) = *(B_frag + offset2);
*((float4*) (filter_frag + 2)) = *(B_frag + f_frag_offset + offset1);
*((float4*) (filter_frag + 3)) = *(B_frag + f_frag_offset + offset2);
}
__device__ __forceinline__ void prefetch_input_frag(float4* input_frag, float4 *A_frag, int frag_offset, int offset1, int offset2){
*((float4*) (input_frag)) = *(A_frag + offset1); //ld_shared(A_frag + offset1);
*((float4*) (input_frag + 1)) = *(A_frag + offset2);
*((float4*) (input_frag + 2)) = *(A_frag + frag_offset + offset1);
*((float4*) (input_frag + 3)) = *(A_frag + frag_offset + offset2); //3=2+1
}
__global__ void Winograd_kernel(float *A, float *B, float *C,
int tiles_dim, int in_c, int in_n, int in_h, int in_w,
int tile_size, int filt_k, int filt_c,
int tiles_2d_dim, int out_c, int out_n,
int tile_2d_s, int out_h, int out_w){
extern __shared__ float shared_mem[];
float *input_smem = (float*)shared_mem;
float *filter_smem = (float*)&shared_mem[16*BC*BN];
short m = 0xFFFF;
if((blockIdx.y/tiles_dim)==0) m&=0xFFF0;
if((blockIdx.y/tiles_dim)==(tiles_dim-1)) m &= (!(in_w%2))?(0x0FFF):(0x00FF);
if(!((blockIdx.y+1)%tiles_dim)) m &= (!(in_w%2))?(0x7777):(0x3333);
if(!((blockIdx.y)%tiles_dim)) m&=0xeeee;
float img_tile[16]; // Prefetch input from GMEM
float filter_tile[32]; // Prefetch filter from GMEM
float4 input_frag_mem[8]; //2*2(2*8/4) Data to do Outer Product + prefetch f. SMEM (double_buffer)
float4 filter_frag_mem[8]; //2*2 Data to do Outer Product + prefetch f. SMEM (double_buffer)
float4 accumulator[2][16] = {0.0f}; // Accumulators
float4 *A_frag; // Input data pointer
int frag_offset = 2* (BC*BN); // (2=8/4) SMEM input read offset
float4 *B_frag; // Filter data pointer
int f_frag_offset = 2* (BC*BK); // (2=8/4) SMEM filter read offset
float4 *input_frag = (float4*) input_frag_mem;
float4 *filter_frag = (float4*) filter_frag_mem;
float4 *swap;
prefetch_input_tile(A, img_tile, in_h, in_w, in_n, tiles_dim, m);
prefetch_filter_tile(B, filter_tile, filt_k);
float4 *input_frag_buffer = (float4*) (input_frag+4);
float4 *filter_frag_buffer = (float4*) (filter_frag+4);
// Mainloop - iterates over the entire K dimension - not unrolled
for(int iter=0; iter<in_c; iter+=BC){ // Current iteration
A_frag = (float4*) (input_smem + threadIdx.y*BC*BN);
B_frag = (float4*) (filter_smem + threadIdx.y*BC*BK);
load_and_transform_input_tile(img_tile, input_smem, in_h, in_w,
tiles_dim, in_c, in_n, tile_size,
tiles_2d_dim, tile_2d_s);
load_filter_tile(filter_tile, filter_smem, filt_c, filt_k);
__syncthreads();
prefetch_input_frag(input_frag, A_frag, frag_offset, access_s[0][threadIdx.x], access_s[1][threadIdx.x]);
prefetch_filter_frag(filter_frag, B_frag, f_frag_offset, access_f_s[0][threadIdx.x], access_f_s[1][threadIdx.x]);
#pragma unroll
for(int i=0; i<BC; i++){
if(i<(BC-1)){
A_frag += BN/4;
B_frag += BK/4;
prefetch_input_frag(input_frag_buffer, A_frag, frag_offset, access_s[0][threadIdx.x], access_s[1][threadIdx.x]);
prefetch_filter_frag(filter_frag_buffer, B_frag, f_frag_offset, access_f_s[0][threadIdx.x], access_f_s[1][threadIdx.x]);
}
outer_product(input_frag, filter_frag, accumulator);
swap = input_frag;
input_frag = input_frag_buffer;
input_frag_buffer = swap;
swap = filter_frag;
filter_frag = filter_frag_buffer;
filter_frag_buffer = swap;
}
A += in_n*BC*in_w*in_h;
B += filt_k*BC*4*4;
if(iter<(in_c-BC)){
prefetch_input_tile(A, img_tile, in_h, in_w, in_n, tiles_dim, m);
prefetch_filter_tile(B, filter_tile, filt_k);
}
__syncthreads();
}
// Transpose, transform and store accumulated result
store_output_tile(accumulator, shared_mem, C, out_h, out_w, tiles_dim, out_n, input_frag_mem, filter_frag_mem, m);
}
hipError_t convolutionForward_32x64x8(float *k, int in_h, int in_w, float *w, int out_h,
int out_w, int out_n, int out_c, float *C, float *Ww,
const unsigned int n,
int tiles_dim, int in_n, int tile_size,
int in_c, int filt_k, int filt_c, int filt_h, int filt_w, int alpha, int m){
int tile_2d_s = tile_size*tile_size;
int tiles_2d_dim = tiles_dim*tiles_dim;
int smem_size = (16*BC*BN + 16*BC*BK)*4;
hipLaunchKernelGGL(( FX), dim3(dim3(filt_k/BK, filt_c/BC)), dim3(dim3(BN, BC)), 0, 0, w, Ww, filt_k, filt_c, filt_h, filt_w, alpha);
#ifdef OPTSTS64_CMP
smem_size = 65536; // 64 KB
hipFuncSetAttribute(Winograd_kernel, hipFuncAttributeMaxDynamicSharedMemorySize, smem_size);
#endif
hipLaunchKernelGGL(( Winograd_kernel), dim3(dim3(in_n/BN, tiles_2d_dim, filt_k/BK)), dim3(dim3(BN, 8)), smem_size, 0, k, Ww, C, tiles_dim, in_c, in_n, in_h, in_w, tile_size, filt_k, filt_c, tiles_2d_dim, out_c, out_n, tile_2d_s, out_h, out_w);
return hipGetLastError();
}
}
#endif
| f83a53f432fd45d4ecda5e59a910332d0526f0ae.cu | // Copyright 2021 Roberto Lopez Castro
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
#include "../FX_m2.cu"
#ifdef OPTLDS64
#include "store_and_transform_output_optLDS64.cuh"
#include "../outer_product.cuh"
#elif OPTSTS64_CMP
#include "store_and_transform_output_optSTS64_compact.cuh"
#include "../outer_product_suffle.cuh"
#else
#include "store_and_transform_output_optSTS64.cuh"
#include "../outer_product_suffle.cuh"
#endif
#ifdef _noWALL_
typedef struct rusage resnfo;
typedef struct _timenfo {
double time;
double systime;
} timenfo;
#define timestamp(sample) getrusage(RUSAGE_SELF, (sample))
#define printtime(t) printf("%15f s (%f user + %f sys) ", \
t.time + t.systime, t.time, t.systime);
#else
typedef struct timeval resnfo;
typedef double timenfo;
#define timestamp(sample) gettimeofday((sample), 0)
#define printtime(t) printf("%15f s ", t);
#endif
#ifndef _WINOGRAD_
#define _WINOGRAD_
extern "C"
{
#define d(input, i, j) ( input[(i<<2) + (j)] )
__device__ __forceinline__ void load_and_transform_input_tile(float *Btd, float *pOutputs, int in_h, int in_w,
int tiles_dim, int in_c, int in_n, int tile_size,
int tiles_2d_dim, int tile_2d_s){
float workspace[3];
#pragma unroll
for(int j=0; j<4; j++){
workspace[0] = Btd[j];
workspace[1] = Btd[j+4];
workspace[2] = Btd[j+8];
Btd[j] = workspace[0] - workspace[2];
Btd[j+4] = workspace[1] + workspace[2];
Btd[j+8] = workspace[2] - workspace[1];
Btd[j+12] = workspace[1] - Btd[j+12];
}
int c_offset = BN*BC;
int c_tensor = threadIdx.y*BN + threadIdx.x;
#pragma unroll
for(int i=0; i<4; i++){ // prefetch 1 input tile/thread
pOutputs[c_tensor+i*c_offset*4] = d(Btd, i, 0) - d(Btd, i, 2);
pOutputs[c_tensor+i*c_offset*4+c_offset] = d(Btd, i, 1) + d(Btd, i, 2);
pOutputs[c_tensor+i*c_offset*4+2*c_offset] = d(Btd, i, 2) - d(Btd, i, 1);
pOutputs[c_tensor+i*c_offset*4+3*c_offset] = d(Btd, i, 1) - d(Btd, i, 3);
}
}
__device__ __forceinline__ void load_filter_tile(float *tiles, float *pOutputs,
int filt_c, int filt_k){
int c_tensor_s = threadIdx.y*BK + threadIdx.x;
int c_offset_s = BK*BC;
for(int k=0; k<2; k++){ // prefetch 2 filter tiles/thread
for(int i=0; i<4; i++){
#pragma unroll
for(int j=0; j<4; j++){
pOutputs[c_tensor_s + i*c_offset_s*4 + j*c_offset_s] = tiles[k*16 + i*4 + j];
}
}
c_tensor_s += BN;
}
}
__device__ __forceinline__ void prefetch_filter_tile(float *pInputs, float *tiles, int filt_k){
int c_tensor = blockIdx.z*BK + (threadIdx.y*filt_k<<4) + threadIdx.x; // Iny*filt_k*4*4
int acumm;
#pragma unroll
for(int i=0; i<4; i++){
acumm = (i*filt_k<<2);
#pragma unroll
for(int j=0; j<4; j++){
tiles[(i<<2) + j] = pInputs[acumm + j*filt_k + c_tensor];
tiles[16 + (i<<2) + j] = pInputs[acumm + j*filt_k + c_tensor+BN];
}
}
}
__device__ __forceinline__ void prefetch_input_tile(float *pInputs, float *tile, int in_h, int in_w, int in_n, int tiles_dim, short mask){
int c_tensor = (blockIdx.y%tiles_dim)*in_n*2 + (blockIdx.y/tiles_dim)*in_n*in_w*2 + blockIdx.x*BN + threadIdx.y*(in_n*in_h*in_w) + (threadIdx.x/in_n)*2*in_n + (threadIdx.x%in_n) - (in_n*in_w+in_n);
int acumm,x;
//short x1,x2;
if(mask==0xFFFF){
#pragma unroll
for(int i=0; i<4; i++){
acumm = i*in_n*in_w;
#pragma unroll
for(int j=0; j<4; j++){
tile[(i<<2) + j] = pInputs[acumm + j*in_n + c_tensor];
}
}
} else {
for(int i=0; i<4; i++){
acumm = i*in_n*in_w;
#pragma unroll
for(int j=0; j<4; j++){
x = (i<<2) + j;
tile[x] = 0;
if(mask&(1<<x))
tile[x]=pInputs[acumm + j*in_n + c_tensor];
}
}
}
}
__device__ __forceinline__ void prefetch_filter_frag(float4 *filter_frag, float4 *B_frag, int f_frag_offset, int offset1, int offset2){
*((float4*) (filter_frag)) = *(B_frag + offset1);
*((float4*) (filter_frag + 1)) = *(B_frag + offset2);
*((float4*) (filter_frag + 2)) = *(B_frag + f_frag_offset + offset1);
*((float4*) (filter_frag + 3)) = *(B_frag + f_frag_offset + offset2);
}
__device__ __forceinline__ void prefetch_input_frag(float4* input_frag, float4 *A_frag, int frag_offset, int offset1, int offset2){
*((float4*) (input_frag)) = *(A_frag + offset1); //ld_shared(A_frag + offset1);
*((float4*) (input_frag + 1)) = *(A_frag + offset2);
*((float4*) (input_frag + 2)) = *(A_frag + frag_offset + offset1);
*((float4*) (input_frag + 3)) = *(A_frag + frag_offset + offset2); //3=2+1
}
__global__ void Winograd_kernel(float *A, float *B, float *C,
int tiles_dim, int in_c, int in_n, int in_h, int in_w,
int tile_size, int filt_k, int filt_c,
int tiles_2d_dim, int out_c, int out_n,
int tile_2d_s, int out_h, int out_w){
extern __shared__ float shared_mem[];
float *input_smem = (float*)shared_mem;
float *filter_smem = (float*)&shared_mem[16*BC*BN];
short m = 0xFFFF;
if((blockIdx.y/tiles_dim)==0) m&=0xFFF0;
if((blockIdx.y/tiles_dim)==(tiles_dim-1)) m &= (!(in_w%2))?(0x0FFF):(0x00FF);
if(!((blockIdx.y+1)%tiles_dim)) m &= (!(in_w%2))?(0x7777):(0x3333);
if(!((blockIdx.y)%tiles_dim)) m&=0xeeee;
float img_tile[16]; // Prefetch input from GMEM
float filter_tile[32]; // Prefetch filter from GMEM
float4 input_frag_mem[8]; //2*2(2*8/4) Data to do Outer Product + prefetch f. SMEM (double_buffer)
float4 filter_frag_mem[8]; //2*2 Data to do Outer Product + prefetch f. SMEM (double_buffer)
float4 accumulator[2][16] = {0.0f}; // Accumulators
float4 *A_frag; // Input data pointer
int frag_offset = 2* (BC*BN); // (2=8/4) SMEM input read offset
float4 *B_frag; // Filter data pointer
int f_frag_offset = 2* (BC*BK); // (2=8/4) SMEM filter read offset
float4 *input_frag = (float4*) input_frag_mem;
float4 *filter_frag = (float4*) filter_frag_mem;
float4 *swap;
prefetch_input_tile(A, img_tile, in_h, in_w, in_n, tiles_dim, m);
prefetch_filter_tile(B, filter_tile, filt_k);
float4 *input_frag_buffer = (float4*) (input_frag+4);
float4 *filter_frag_buffer = (float4*) (filter_frag+4);
// Mainloop - iterates over the entire K dimension - not unrolled
for(int iter=0; iter<in_c; iter+=BC){ // Current iteration
A_frag = (float4*) (input_smem + threadIdx.y*BC*BN);
B_frag = (float4*) (filter_smem + threadIdx.y*BC*BK);
load_and_transform_input_tile(img_tile, input_smem, in_h, in_w,
tiles_dim, in_c, in_n, tile_size,
tiles_2d_dim, tile_2d_s);
load_filter_tile(filter_tile, filter_smem, filt_c, filt_k);
__syncthreads();
prefetch_input_frag(input_frag, A_frag, frag_offset, access_s[0][threadIdx.x], access_s[1][threadIdx.x]);
prefetch_filter_frag(filter_frag, B_frag, f_frag_offset, access_f_s[0][threadIdx.x], access_f_s[1][threadIdx.x]);
#pragma unroll
for(int i=0; i<BC; i++){
if(i<(BC-1)){
A_frag += BN/4;
B_frag += BK/4;
prefetch_input_frag(input_frag_buffer, A_frag, frag_offset, access_s[0][threadIdx.x], access_s[1][threadIdx.x]);
prefetch_filter_frag(filter_frag_buffer, B_frag, f_frag_offset, access_f_s[0][threadIdx.x], access_f_s[1][threadIdx.x]);
}
outer_product(input_frag, filter_frag, accumulator);
swap = input_frag;
input_frag = input_frag_buffer;
input_frag_buffer = swap;
swap = filter_frag;
filter_frag = filter_frag_buffer;
filter_frag_buffer = swap;
}
A += in_n*BC*in_w*in_h;
B += filt_k*BC*4*4;
if(iter<(in_c-BC)){
prefetch_input_tile(A, img_tile, in_h, in_w, in_n, tiles_dim, m);
prefetch_filter_tile(B, filter_tile, filt_k);
}
__syncthreads();
}
// Transpose, transform and store accumulated result
store_output_tile(accumulator, shared_mem, C, out_h, out_w, tiles_dim, out_n, input_frag_mem, filter_frag_mem, m);
}
cudaError_t convolutionForward_32x64x8(float *k, int in_h, int in_w, float *w, int out_h,
int out_w, int out_n, int out_c, float *C, float *Ww,
const unsigned int n,
int tiles_dim, int in_n, int tile_size,
int in_c, int filt_k, int filt_c, int filt_h, int filt_w, int alpha, int m){
int tile_2d_s = tile_size*tile_size;
int tiles_2d_dim = tiles_dim*tiles_dim;
int smem_size = (16*BC*BN + 16*BC*BK)*4;
FX<<<dim3(filt_k/BK, filt_c/BC), dim3(BN, BC)>>>(w, Ww, filt_k, filt_c, filt_h, filt_w, alpha);
#ifdef OPTSTS64_CMP
smem_size = 65536; // 64 KB
cudaFuncSetAttribute(Winograd_kernel, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_size);
#endif
Winograd_kernel<<<dim3(in_n/BN, tiles_2d_dim, filt_k/BK), dim3(BN, 8), smem_size>>>(k, Ww, C, tiles_dim, in_c, in_n, in_h, in_w, tile_size, filt_k, filt_c, tiles_2d_dim, out_c, out_n, tile_2d_s, out_h, out_w);
return cudaGetLastError();
}
}
#endif
|
33cf9f8089bb982194bfd60f505cd66d84052e24.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <hip/hip_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#define MASK_DIMENSION 3
#define MASK_OFFSET (MASK_DIMENSION / 2)
#define tile_width 8
#define BLOCK_SIZE 8
const char *iname = "lena_bw.pgm";
const char *Imp_type = "simpleTexture";
//__constant__ float const_sharp[9] = {-1,-1,-1,-1,9,-1,-1,-1,-1};
__constant__ float const_edge[9] = {-1,0,1,-2,0,2,-1,0,1};
//__constant__ float const_av[9] = {1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9};
__global__ void convolution_shared(float *image, float *output, size_t width){
__shared__ float image_ds[(tile_width + MASK_DIMENSION - 1)*(tile_width + MASK_DIMENSION - 1)];
int s_width = (tile_width + MASK_DIMENSION - 1);
int ty = threadIdx.y;
int tx = threadIdx.x;
int block_y = blockIdx.y;
int block_x = blockIdx.x;
int x = block_x * tile_width + tx;
int y = block_y * tile_width + ty;
int h_I_t = y - MASK_OFFSET;
int h_I_b = y + MASK_OFFSET;
int h_I_l = x - MASK_OFFSET;
int h_I_r = x + MASK_OFFSET;
if (h_I_t < 0 || h_I_l < 0)
image_ds[ty*s_width+tx] = 0;
else
image_ds[ty*s_width+tx] = image[y*width+x - MASK_OFFSET*width - MASK_OFFSET];
if (h_I_r >= width || h_I_t < 0)
image_ds[ty*s_width+(tx+MASK_OFFSET+MASK_OFFSET)] = 0;
else
image_ds[ty*s_width+(tx+MASK_OFFSET+MASK_OFFSET)] = image[y*width+x - MASK_OFFSET*width + MASK_OFFSET];
if (h_I_b >= width || h_I_l < 0)
image_ds[(ty+MASK_OFFSET+MASK_OFFSET)*s_width+tx] = 0;
else
image_ds[(ty+MASK_OFFSET+MASK_OFFSET)*s_width+tx] = image[y*width+x + MASK_OFFSET*width - MASK_OFFSET];
if (h_I_r >= width || h_I_b >= width)
image_ds[(ty+MASK_OFFSET+MASK_OFFSET)*s_width+(tx+MASK_OFFSET+MASK_OFFSET)] = 0;
else
image_ds[(ty+MASK_OFFSET+MASK_OFFSET)*s_width+(tx+MASK_OFFSET+MASK_OFFSET)] = image[y*width+x + MASK_OFFSET*width + MASK_OFFSET];
__syncthreads();
float out = 0;
for (int j = 0; j < MASK_DIMENSION; ++j) {
for (int i = 0; i < MASK_DIMENSION; ++i) {
out += (float)image_ds[(j + ty)*s_width+(i + tx)] * const_edge[j*MASK_DIMENSION+i];
}
}
if(out>0.4 || out<-0.4){
output[y*width+x] = 1;
}
else{
output[y*width+x] = 0;
}
}
int main(int argc, char **argv)
{
printf("%s starting...\n", Imp_type);
int devID = findCudaDevice(argc, (const char **) argv);
float *h_d = NULL;
unsigned int width, height;
char *img_p = sdkFindFilePath(iname, argv[0]);
if (img_p == NULL)
{
printf("Unable to source image file: %s\n", iname);
exit(EXIT_FAILURE);
}
sdkLoadPGM(img_p, &h_d, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", iname, width, height);
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
float *g_d = 0,*d_d=0;
checkCudaErrors(hipMalloc((void **) &g_d, size));
checkCudaErrors(hipMalloc((void **) &d_d, size));
hipMemcpy(d_d,h_d, size, hipMemcpyHostToDevice);
StopWatchInterface *g_timer = NULL;
sdkCreateTimer(&g_timer);
sdkStartTimer(&g_timer);
//size_t shm_size = BLOCK_SIZE * sizeof(unsigned long long);
hipLaunchKernelGGL(( convolution_shared), dim3(dimGrid),dim3(dimBlock),0, 0, d_d,g_d,width);
getLastCudaError("Kernel execution failed");
checkCudaErrors(hipDeviceSynchronize());
sdkStopTimer(&g_timer);
printf("Processing time for shared: %f (ms)\n", sdkGetTimerValue(&g_timer));
printf("%.2f Mpixels/sec\n",(width *height / (sdkGetTimerValue(&g_timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&g_timer);
// Allocate mem for the result on host side
float *global_out = (float *) malloc(size);
checkCudaErrors(hipMemcpy(global_out,g_d,size,hipMemcpyDeviceToHost));
// Write result to file
char gl_out[1024];
strcpy(gl_out, img_p);
strcpy(gl_out + strlen(img_p) - 4, "_share_out.pgm");
sdkSavePGM(gl_out, global_out, width, height);
printf("Wrote '%s'\n", gl_out);
free(img_p);
hipDeviceReset();
return 0;
}
| 33cf9f8089bb982194bfd60f505cd66d84052e24.cu |
#include <stdlib.h>
#include <stdio.h>
#include <string.h>
#include <math.h>
#include <cuda_runtime.h>
#include <helper_functions.h>
#include <helper_cuda.h>
#define MASK_DIMENSION 3
#define MASK_OFFSET (MASK_DIMENSION / 2)
#define tile_width 8
#define BLOCK_SIZE 8
const char *iname = "lena_bw.pgm";
const char *Imp_type = "simpleTexture";
//__constant__ float const_sharp[9] = {-1,-1,-1,-1,9,-1,-1,-1,-1};
__constant__ float const_edge[9] = {-1,0,1,-2,0,2,-1,0,1};
//__constant__ float const_av[9] = {1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9,1/9};
__global__ void convolution_shared(float *image, float *output, size_t width){
__shared__ float image_ds[(tile_width + MASK_DIMENSION - 1)*(tile_width + MASK_DIMENSION - 1)];
int s_width = (tile_width + MASK_DIMENSION - 1);
int ty = threadIdx.y;
int tx = threadIdx.x;
int block_y = blockIdx.y;
int block_x = blockIdx.x;
int x = block_x * tile_width + tx;
int y = block_y * tile_width + ty;
int h_I_t = y - MASK_OFFSET;
int h_I_b = y + MASK_OFFSET;
int h_I_l = x - MASK_OFFSET;
int h_I_r = x + MASK_OFFSET;
if (h_I_t < 0 || h_I_l < 0)
image_ds[ty*s_width+tx] = 0;
else
image_ds[ty*s_width+tx] = image[y*width+x - MASK_OFFSET*width - MASK_OFFSET];
if (h_I_r >= width || h_I_t < 0)
image_ds[ty*s_width+(tx+MASK_OFFSET+MASK_OFFSET)] = 0;
else
image_ds[ty*s_width+(tx+MASK_OFFSET+MASK_OFFSET)] = image[y*width+x - MASK_OFFSET*width + MASK_OFFSET];
if (h_I_b >= width || h_I_l < 0)
image_ds[(ty+MASK_OFFSET+MASK_OFFSET)*s_width+tx] = 0;
else
image_ds[(ty+MASK_OFFSET+MASK_OFFSET)*s_width+tx] = image[y*width+x + MASK_OFFSET*width - MASK_OFFSET];
if (h_I_r >= width || h_I_b >= width)
image_ds[(ty+MASK_OFFSET+MASK_OFFSET)*s_width+(tx+MASK_OFFSET+MASK_OFFSET)] = 0;
else
image_ds[(ty+MASK_OFFSET+MASK_OFFSET)*s_width+(tx+MASK_OFFSET+MASK_OFFSET)] = image[y*width+x + MASK_OFFSET*width + MASK_OFFSET];
__syncthreads();
float out = 0;
for (int j = 0; j < MASK_DIMENSION; ++j) {
for (int i = 0; i < MASK_DIMENSION; ++i) {
out += (float)image_ds[(j + ty)*s_width+(i + tx)] * const_edge[j*MASK_DIMENSION+i];
}
}
if(out>0.4 || out<-0.4){
output[y*width+x] = 1;
}
else{
output[y*width+x] = 0;
}
}
int main(int argc, char **argv)
{
printf("%s starting...\n", Imp_type);
int devID = findCudaDevice(argc, (const char **) argv);
float *h_d = NULL;
unsigned int width, height;
char *img_p = sdkFindFilePath(iname, argv[0]);
if (img_p == NULL)
{
printf("Unable to source image file: %s\n", iname);
exit(EXIT_FAILURE);
}
sdkLoadPGM(img_p, &h_d, &width, &height);
unsigned int size = width * height * sizeof(float);
printf("Loaded '%s', %d x %d pixels\n", iname, width, height);
dim3 dimBlock(8, 8, 1);
dim3 dimGrid(width / dimBlock.x, height / dimBlock.y, 1);
float *g_d = 0,*d_d=0;
checkCudaErrors(cudaMalloc((void **) &g_d, size));
checkCudaErrors(cudaMalloc((void **) &d_d, size));
cudaMemcpy(d_d,h_d, size, cudaMemcpyHostToDevice);
StopWatchInterface *g_timer = NULL;
sdkCreateTimer(&g_timer);
sdkStartTimer(&g_timer);
//size_t shm_size = BLOCK_SIZE * sizeof(unsigned long long);
convolution_shared<<<dimGrid,dimBlock,0>>>(d_d,g_d,width);
getLastCudaError("Kernel execution failed");
checkCudaErrors(cudaDeviceSynchronize());
sdkStopTimer(&g_timer);
printf("Processing time for shared: %f (ms)\n", sdkGetTimerValue(&g_timer));
printf("%.2f Mpixels/sec\n",(width *height / (sdkGetTimerValue(&g_timer) / 1000.0f)) / 1e6);
sdkDeleteTimer(&g_timer);
// Allocate mem for the result on host side
float *global_out = (float *) malloc(size);
checkCudaErrors(cudaMemcpy(global_out,g_d,size,cudaMemcpyDeviceToHost));
// Write result to file
char gl_out[1024];
strcpy(gl_out, img_p);
strcpy(gl_out + strlen(img_p) - 4, "_share_out.pgm");
sdkSavePGM(gl_out, global_out, width, height);
printf("Wrote '%s'\n", gl_out);
free(img_p);
cudaDeviceReset();
return 0;
}
|
e429ffd2b5935baba534dcd9e5b82d3ba7958594.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
hipError_t err = hipGetLastError();
if (hipSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, hipGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
bools[index] = (idata[index] != 0);
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
// Assume bools initialized with 0s
if (bools[index]) {
odata[indices[index]] = idata[index];
}
}
}
}
| e429ffd2b5935baba534dcd9e5b82d3ba7958594.cu | #include "common.h"
void checkCUDAErrorFn(const char *msg, const char *file, int line) {
cudaError_t err = cudaGetLastError();
if (cudaSuccess == err) {
return;
}
fprintf(stderr, "CUDA error");
if (file) {
fprintf(stderr, " (%s:%d)", file, line);
}
fprintf(stderr, ": %s: %s\n", msg, cudaGetErrorString(err));
exit(EXIT_FAILURE);
}
namespace StreamCompaction {
namespace Common {
/**
* Maps an array to an array of 0s and 1s for stream compaction. Elements
* which map to 0 will be removed, and elements which map to 1 will be kept.
*/
__global__ void kernMapToBoolean(int n, int *bools, const int *idata) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
bools[index] = (idata[index] != 0);
}
/**
* Performs scatter on an array. That is, for each element in idata,
* if bools[idx] == 1, it copies idata[idx] to odata[indices[idx]].
*/
__global__ void kernScatter(int n, int *odata,
const int *idata, const int *bools, const int *indices) {
int index = threadIdx.x + (blockIdx.x * blockDim.x);
if (index >= n) {
return;
}
// Assume bools initialized with 0s
if (bools[index]) {
odata[indices[index]] = idata[index];
}
}
}
}
|
eb16bbcea4d9ce022c5a4fda734d2d2b9da9bf1e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*!
* Copyright (c) 2021 by Contributors
* \file graph/sampling/frequency_hashmap.cu
* \brief frequency hashmap - used to select top-k frequency edges of each node
*/
#include <algorithm>
#include <tuple>
#include <utility>
#include "../../../runtime/cuda/cuda_common.h"
#include "../../../array/cuda/atomic.cuh"
#include "../../../array/cuda/dgl_cub.cuh"
#include "frequency_hashmap.cuh"
namespace dgl {
namespace sampling {
namespace impl {
namespace {
int64_t _table_size(const int64_t num, const int64_t scale) {
/**
* Calculate the number of buckets in the hashtable. To guarantee we can
* fill the hashtable in the worst case, we must use a number of buckets which
* is a power of two.
* https://en.wikipedia.org/wiki/Quadratic_probing#Limitations
*/
const int64_t next_pow2 = 1 << static_cast<int64_t>(1 + std::log2(num >> 1));
return next_pow2 << scale;
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _init_edge_table(void *edge_hashmap, int64_t edges_len) {
using EdgeItem = typename DeviceEdgeHashmap<IdxType>::EdgeItem;
auto edge_hashmap_t = static_cast<EdgeItem*>(edge_hashmap);
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
#pragma unroll(4)
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < edges_len) {
EdgeItem *edge = (edge_hashmap_t + idx);
edge->src = static_cast<IdxType>(-1);
edge->cnt = static_cast<IdxType>(0);
}
}
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _count_frequency(const IdxType *src_data,
const int64_t num_edges, const int64_t num_edges_per_node,
IdxType *edge_blocks_prefix, bool *is_first_position,
DeviceEdgeHashmap<IdxType> device_edge_hashmap) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
IdxType count = 0;
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_edges) {
IdxType src = src_data[idx];
if (src == static_cast<IdxType>(-1)) {
continue;
}
IdxType dst_idx = (idx / num_edges_per_node);
if (device_edge_hashmap.InsertEdge(src, dst_idx) == 0) {
is_first_position[idx] = true;
++count;
}
}
}
using BlockReduce = typename hipcub::BlockReduce<IdxType, BLOCK_SIZE>;
__shared__ typename BlockReduce::TempStorage temp_space;
count = BlockReduce(temp_space).Sum(count);
if (threadIdx.x == 0) {
edge_blocks_prefix[blockIdx.x] = count;
if (blockIdx.x == 0) {
edge_blocks_prefix[gridDim.x] = 0;
}
}
}
/**
* This structure is used with cub's block-level prefixscan in order to
* keep a running sum as items are iteratively processed.
*/
template <typename T>
struct BlockPrefixCallbackOp {
T _running_total;
__device__ BlockPrefixCallbackOp(const T running_total)
: _running_total(running_total) {}
__device__ T operator()(const T block_aggregate) {
const T old_prefix = _running_total;
_running_total += block_aggregate;
return old_prefix;
}
};
template<typename IdxType, typename Idx64Type, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _compact_frequency(const IdxType *src_data, const IdxType *dst_data,
const int64_t num_edges, const int64_t num_edges_per_node,
const IdxType *edge_blocks_prefix, const bool *is_first_position,
IdxType *num_unique_each_node,
IdxType *unique_src_edges, Idx64Type *unique_frequency,
DeviceEdgeHashmap<IdxType> device_edge_hashmap) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
const IdxType block_offset = edge_blocks_prefix[blockIdx.x];
using BlockScan = typename hipcub::BlockScan<IdxType, BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<IdxType> prefix_op(0);
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
IdxType flag = 0;
if (idx < num_edges) {
IdxType src = src_data[idx];
IdxType dst_idx = (idx / num_edges_per_node);
if (idx % num_edges_per_node == 0) {
num_unique_each_node[dst_idx] = device_edge_hashmap.GetDstCount(dst_idx);
}
if (is_first_position[idx] == true) {
flag = 1;
}
BlockScan(temp_space).ExclusiveSum(flag, flag, prefix_op);
__syncthreads();
if (is_first_position[idx] == true) {
const IdxType pos = (block_offset + flag);
unique_src_edges[pos] = src;
if (sizeof(IdxType) != sizeof(Idx64Type)
&& sizeof(IdxType) == 4) { // if IdxType is a 32-bit data
unique_frequency[pos] = (
(static_cast<Idx64Type>(num_edges / num_edges_per_node - dst_idx) << 32)
| device_edge_hashmap.GetEdgeCount(src, dst_idx));
} else {
unique_frequency[pos] = device_edge_hashmap.GetEdgeCount(src, dst_idx);
}
}
}
}
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _get_pick_num(IdxType *num_unique_each_node,
const int64_t num_pick, const int64_t num_dst_nodes) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
#pragma unroll(4)
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_dst_nodes) {
IdxType &num_unique = num_unique_each_node[idx];
num_unique = min(num_unique, static_cast<IdxType>(num_pick));
}
}
}
template<typename IdxType, typename Idx64Type, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _pick_data(const Idx64Type *unique_frequency, const IdxType *unique_src_edges,
const IdxType *unique_input_offsets, const IdxType *dst_data,
const int64_t num_edges_per_node, const int64_t num_dst_nodes,
const int64_t num_edges,
const IdxType *unique_output_offsets,
IdxType *output_src, IdxType *output_dst, IdxType *output_frequency) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_dst_nodes) {
const int64_t dst_pos = (idx * num_edges_per_node);
assert(dst_pos < num_edges);
const IdxType dst = dst_data[dst_pos];
const IdxType last_output_offset = unique_output_offsets[idx + 1];
assert((last_output_offset - unique_output_offsets[idx]) <=
(unique_input_offsets[idx + 1] - unique_input_offsets[idx]));
for (IdxType output_idx = unique_output_offsets[idx], input_idx = unique_input_offsets[idx];
output_idx < last_output_offset; ++output_idx, ++input_idx) {
output_src[output_idx] = unique_src_edges[input_idx];
output_dst[output_idx] = dst;
output_frequency[output_idx] = static_cast<IdxType>(unique_frequency[input_idx]);
}
}
}
}
} // namespace
// return the old cnt of this edge
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::InsertEdge(
const IdxType &src, const IdxType &dst_idx) {
IdxType start_off = dst_idx * _num_items_each_dst;
IdxType pos = EdgeHash(src);
IdxType delta = 1;
IdxType old_cnt = static_cast<IdxType>(-1);
while (true) {
IdxType old_src = dgl::aten::cuda::AtomicCAS(
&_edge_hashmap[start_off + pos].src, static_cast<IdxType>(-1), src);
if (old_src == static_cast<IdxType>(-1) || old_src == src) {
// first insert
old_cnt = dgl::aten::cuda::AtomicAdd(
&_edge_hashmap[start_off + pos].cnt, static_cast<IdxType>(1));
if (old_src == static_cast<IdxType>(-1)) {
assert(dst_idx < _num_dst);
dgl::aten::cuda::AtomicAdd(&_dst_unique_edges[dst_idx], static_cast<IdxType>(1));
}
break;
}
pos = EdgeHash(pos + delta);
delta += 1;
}
return old_cnt;
}
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::GetDstCount(const IdxType &dst_idx) {
return _dst_unique_edges[dst_idx];
}
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::GetEdgeCount(
const IdxType &src, const IdxType &dst_idx) {
IdxType start_off = dst_idx * _num_items_each_dst;
IdxType pos = EdgeHash(src);
IdxType delta = 1;
while (_edge_hashmap[start_off + pos].src != src) {
pos = EdgeHash(pos + delta);
delta += 1;
}
return _edge_hashmap[start_off + pos].cnt;
}
template <typename IdxType>
FrequencyHashmap<IdxType>::FrequencyHashmap(
int64_t num_dst, int64_t num_items_each_dst, DGLContext ctx,
hipStream_t stream, int64_t edge_table_scale) {
_ctx = ctx;
_stream = stream;
num_items_each_dst = _table_size(num_items_each_dst, edge_table_scale);
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
auto dst_unique_edges = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, (num_dst) * sizeof(IdxType)));
auto edge_hashmap = static_cast<EdgeItem*>(
device->AllocWorkspace(_ctx, (num_dst * num_items_each_dst) * sizeof(EdgeItem)));
constexpr int BLOCK_SIZE = 256;
constexpr int TILE_SIZE = BLOCK_SIZE * 8;
dim3 block(BLOCK_SIZE);
dim3 grid((num_dst * num_items_each_dst + TILE_SIZE - 1) / TILE_SIZE);
hipMemset(dst_unique_edges, 0, (num_dst) * sizeof(IdxType));
hipLaunchKernelGGL(( _init_edge_table<IdxType, BLOCK_SIZE, TILE_SIZE>), dim3(grid), dim3(block), 0, _stream,
edge_hashmap, (num_dst * num_items_each_dst));
_device_edge_hashmap = new DeviceEdgeHashmap<IdxType>(
num_dst, num_items_each_dst, dst_unique_edges, edge_hashmap);
_dst_unique_edges = dst_unique_edges;
_edge_hashmap = edge_hashmap;
}
template <typename IdxType>
FrequencyHashmap<IdxType>::~FrequencyHashmap() {
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
delete _device_edge_hashmap;
_device_edge_hashmap = nullptr;
device->FreeWorkspace(_ctx, _dst_unique_edges);
_dst_unique_edges = nullptr;
device->FreeWorkspace(_ctx, _edge_hashmap);
_edge_hashmap = nullptr;
}
template <typename IdxType>
std::tuple<IdArray, IdArray, IdArray> FrequencyHashmap<IdxType>::Topk(
const IdxType *src_data, const IdxType *dst_data, DLDataType dtype,
const int64_t num_edges, const int64_t num_edges_per_node,
const int64_t num_pick) {
using Idx64Type = int64_t;
const int64_t num_dst_nodes = (num_edges / num_edges_per_node);
constexpr int BLOCK_SIZE = 256;
// XXX: a experienced value, best performance in GV100
constexpr int TILE_SIZE = BLOCK_SIZE * 32;
const dim3 block(BLOCK_SIZE);
const dim3 edges_grid((num_edges + TILE_SIZE - 1) / TILE_SIZE);
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
const IdxType num_edge_blocks = static_cast<IdxType>(edges_grid.x);
IdxType num_unique_edges = 0;
// to mark if this position of edges is the first inserting position for _edge_hashmap
bool *is_first_position = static_cast<bool*>(
device->AllocWorkspace(_ctx, sizeof(bool) * (num_edges)));
CUDA_CALL(hipMemset(is_first_position, 0, sizeof(bool) * (num_edges)));
// double space to use ExclusiveSum
auto edge_blocks_prefix_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 2 * sizeof(IdxType) * (num_edge_blocks + 1)));
IdxType *edge_blocks_prefix = edge_blocks_prefix_data;
IdxType *edge_blocks_prefix_alternate = (edge_blocks_prefix_data + (num_edge_blocks + 1));
// triple space to use ExclusiveSum and unique_output_offsets
auto num_unique_each_node_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 3 * sizeof(IdxType) * (num_dst_nodes + 1)));
IdxType *num_unique_each_node = num_unique_each_node_data;
IdxType *num_unique_each_node_alternate = (num_unique_each_node_data + (num_dst_nodes + 1));
IdxType *unique_output_offsets = (num_unique_each_node_data + 2 * (num_dst_nodes + 1));
// 1. Scan the all edges and count the unique edges and unique edges for each dst node
hipLaunchKernelGGL(( _count_frequency<IdxType, BLOCK_SIZE, TILE_SIZE>), dim3(edges_grid), dim3(block), 0, _stream,
src_data, num_edges, num_edges_per_node,
edge_blocks_prefix, is_first_position, *_device_edge_hashmap);
// 2. Compact the unique edges frequency
// 2.1 ExclusiveSum the edge_blocks_prefix
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
edge_blocks_prefix, edge_blocks_prefix_alternate, num_edge_blocks + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
edge_blocks_prefix, edge_blocks_prefix_alternate, num_edge_blocks + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
std::swap(edge_blocks_prefix, edge_blocks_prefix_alternate);
device->CopyDataFromTo(&edge_blocks_prefix[num_edge_blocks], 0, &num_unique_edges, 0,
sizeof(num_unique_edges),
_ctx, DGLContext{kDLCPU, 0},
dtype, _stream);
device->StreamSync(_ctx, _stream);
// 2.2 Allocate the data of unique edges and frequency
// double space to use SegmentedRadixSort
auto unique_src_edges_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 2 * sizeof(IdxType) * (num_unique_edges)));
IdxType *unique_src_edges = unique_src_edges_data;
IdxType *unique_src_edges_alternate = unique_src_edges_data + num_unique_edges;
// double space to use SegmentedRadixSort
auto unique_frequency_data = static_cast<Idx64Type*>(
device->AllocWorkspace(_ctx, 2 * sizeof(Idx64Type) * (num_unique_edges)));
Idx64Type *unique_frequency = unique_frequency_data;
Idx64Type *unique_frequency_alternate = unique_frequency_data + num_unique_edges;
// 2.3 Compact the unique edges and their frequency
hipLaunchKernelGGL(( _compact_frequency<IdxType, Idx64Type, BLOCK_SIZE, TILE_SIZE>), dim3(edges_grid), dim3(block), 0, _stream,
src_data, dst_data, num_edges, num_edges_per_node,
edge_blocks_prefix, is_first_position, num_unique_each_node,
unique_src_edges, unique_frequency, *_device_edge_hashmap);
// 3. SegmentedRadixSort the unique edges and unique_frequency
// 3.1 ExclusiveSum the num_unique_each_node
d_temp_storage = nullptr;
temp_storage_bytes = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, num_unique_each_node_alternate, num_dst_nodes + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, num_unique_each_node_alternate, num_dst_nodes + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
// 3.2 SegmentedRadixSort the unique_src_edges and unique_frequency
// Create a set of DoubleBuffers to wrap pairs of device pointers
cub::DoubleBuffer<Idx64Type> d_unique_frequency(unique_frequency, unique_frequency_alternate);
cub::DoubleBuffer<IdxType> d_unique_src_edges(unique_src_edges, unique_src_edges_alternate);
// Determine temporary device storage requirements
d_temp_storage = nullptr;
temp_storage_bytes = 0;
// the DeviceRadixSort is faster than DeviceSegmentedRadixSort,
// especially when num_dst_nodes is large (about ~10000)
if (dtype.bits == 32) {
CUDA_CALL(hipcub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges));
} else {
CUDA_CALL(hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges, num_dst_nodes,
num_unique_each_node_alternate, num_unique_each_node_alternate + 1));
}
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
if (dtype.bits == 32) {
CUDA_CALL(hipcub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges));
} else {
CUDA_CALL(hipcub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges, num_dst_nodes,
num_unique_each_node_alternate, num_unique_each_node_alternate + 1));
}
device->FreeWorkspace(_ctx, d_temp_storage);
// 4. Get the final pick number for each dst node
// 4.1 Reset the min(num_pick, num_unique_each_node) to num_unique_each_node
constexpr int NODE_TILE_SIZE = BLOCK_SIZE * 2;
const dim3 nodes_grid((num_dst_nodes + NODE_TILE_SIZE - 1) / NODE_TILE_SIZE);
hipLaunchKernelGGL(( _get_pick_num<IdxType, BLOCK_SIZE, NODE_TILE_SIZE>), dim3(nodes_grid), dim3(block), 0, _stream,
num_unique_each_node, num_pick, num_dst_nodes);
// 4.2 ExclusiveSum the new num_unique_each_node as unique_output_offsets
// use unique_output_offsets;
d_temp_storage = nullptr;
temp_storage_bytes = 0;
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, unique_output_offsets, num_dst_nodes + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(hipcub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, unique_output_offsets, num_dst_nodes + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
// 5. Pick the data to result
IdxType num_output = 0;
device->CopyDataFromTo(&unique_output_offsets[num_dst_nodes], 0, &num_output, 0,
sizeof(num_output),
_ctx, DGLContext{kDLCPU, 0},
dtype, _stream);
device->StreamSync(_ctx, _stream);
IdArray res_src = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
IdArray res_dst = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
IdArray res_cnt = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
hipLaunchKernelGGL(( _pick_data<IdxType, Idx64Type, BLOCK_SIZE, NODE_TILE_SIZE>), dim3(nodes_grid), dim3(block), 0, _stream,
d_unique_frequency.Current(), d_unique_src_edges.Current(), num_unique_each_node_alternate,
dst_data, num_edges_per_node, num_dst_nodes, num_edges,
unique_output_offsets,
res_src.Ptr<IdxType>(), res_dst.Ptr<IdxType>(), res_cnt.Ptr<IdxType>());
device->FreeWorkspace(_ctx, is_first_position);
device->FreeWorkspace(_ctx, edge_blocks_prefix_data);
device->FreeWorkspace(_ctx, num_unique_each_node_data);
device->FreeWorkspace(_ctx, unique_src_edges_data);
device->FreeWorkspace(_ctx, unique_frequency_data);
return std::make_tuple(res_src, res_dst, res_cnt);
}
template
class FrequencyHashmap<int64_t>;
template
class FrequencyHashmap<int32_t>;
}; // namespace impl
}; // namespace sampling
}; // namespace dgl
| eb16bbcea4d9ce022c5a4fda734d2d2b9da9bf1e.cu | /*!
* Copyright (c) 2021 by Contributors
* \file graph/sampling/frequency_hashmap.cu
* \brief frequency hashmap - used to select top-k frequency edges of each node
*/
#include <algorithm>
#include <tuple>
#include <utility>
#include "../../../runtime/cuda/cuda_common.h"
#include "../../../array/cuda/atomic.cuh"
#include "../../../array/cuda/dgl_cub.cuh"
#include "frequency_hashmap.cuh"
namespace dgl {
namespace sampling {
namespace impl {
namespace {
int64_t _table_size(const int64_t num, const int64_t scale) {
/**
* Calculate the number of buckets in the hashtable. To guarantee we can
* fill the hashtable in the worst case, we must use a number of buckets which
* is a power of two.
* https://en.wikipedia.org/wiki/Quadratic_probing#Limitations
*/
const int64_t next_pow2 = 1 << static_cast<int64_t>(1 + std::log2(num >> 1));
return next_pow2 << scale;
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _init_edge_table(void *edge_hashmap, int64_t edges_len) {
using EdgeItem = typename DeviceEdgeHashmap<IdxType>::EdgeItem;
auto edge_hashmap_t = static_cast<EdgeItem*>(edge_hashmap);
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
#pragma unroll(4)
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < edges_len) {
EdgeItem *edge = (edge_hashmap_t + idx);
edge->src = static_cast<IdxType>(-1);
edge->cnt = static_cast<IdxType>(0);
}
}
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _count_frequency(const IdxType *src_data,
const int64_t num_edges, const int64_t num_edges_per_node,
IdxType *edge_blocks_prefix, bool *is_first_position,
DeviceEdgeHashmap<IdxType> device_edge_hashmap) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
IdxType count = 0;
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_edges) {
IdxType src = src_data[idx];
if (src == static_cast<IdxType>(-1)) {
continue;
}
IdxType dst_idx = (idx / num_edges_per_node);
if (device_edge_hashmap.InsertEdge(src, dst_idx) == 0) {
is_first_position[idx] = true;
++count;
}
}
}
using BlockReduce = typename cub::BlockReduce<IdxType, BLOCK_SIZE>;
__shared__ typename BlockReduce::TempStorage temp_space;
count = BlockReduce(temp_space).Sum(count);
if (threadIdx.x == 0) {
edge_blocks_prefix[blockIdx.x] = count;
if (blockIdx.x == 0) {
edge_blocks_prefix[gridDim.x] = 0;
}
}
}
/**
* This structure is used with cub's block-level prefixscan in order to
* keep a running sum as items are iteratively processed.
*/
template <typename T>
struct BlockPrefixCallbackOp {
T _running_total;
__device__ BlockPrefixCallbackOp(const T running_total)
: _running_total(running_total) {}
__device__ T operator()(const T block_aggregate) {
const T old_prefix = _running_total;
_running_total += block_aggregate;
return old_prefix;
}
};
template<typename IdxType, typename Idx64Type, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _compact_frequency(const IdxType *src_data, const IdxType *dst_data,
const int64_t num_edges, const int64_t num_edges_per_node,
const IdxType *edge_blocks_prefix, const bool *is_first_position,
IdxType *num_unique_each_node,
IdxType *unique_src_edges, Idx64Type *unique_frequency,
DeviceEdgeHashmap<IdxType> device_edge_hashmap) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
const IdxType block_offset = edge_blocks_prefix[blockIdx.x];
using BlockScan = typename cub::BlockScan<IdxType, BLOCK_SIZE>;
__shared__ typename BlockScan::TempStorage temp_space;
BlockPrefixCallbackOp<IdxType> prefix_op(0);
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
IdxType flag = 0;
if (idx < num_edges) {
IdxType src = src_data[idx];
IdxType dst_idx = (idx / num_edges_per_node);
if (idx % num_edges_per_node == 0) {
num_unique_each_node[dst_idx] = device_edge_hashmap.GetDstCount(dst_idx);
}
if (is_first_position[idx] == true) {
flag = 1;
}
BlockScan(temp_space).ExclusiveSum(flag, flag, prefix_op);
__syncthreads();
if (is_first_position[idx] == true) {
const IdxType pos = (block_offset + flag);
unique_src_edges[pos] = src;
if (sizeof(IdxType) != sizeof(Idx64Type)
&& sizeof(IdxType) == 4) { // if IdxType is a 32-bit data
unique_frequency[pos] = (
(static_cast<Idx64Type>(num_edges / num_edges_per_node - dst_idx) << 32)
| device_edge_hashmap.GetEdgeCount(src, dst_idx));
} else {
unique_frequency[pos] = device_edge_hashmap.GetEdgeCount(src, dst_idx);
}
}
}
}
}
template<typename IdxType, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _get_pick_num(IdxType *num_unique_each_node,
const int64_t num_pick, const int64_t num_dst_nodes) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
#pragma unroll(4)
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_dst_nodes) {
IdxType &num_unique = num_unique_each_node[idx];
num_unique = min(num_unique, static_cast<IdxType>(num_pick));
}
}
}
template<typename IdxType, typename Idx64Type, int BLOCK_SIZE, int TILE_SIZE>
__global__ void _pick_data(const Idx64Type *unique_frequency, const IdxType *unique_src_edges,
const IdxType *unique_input_offsets, const IdxType *dst_data,
const int64_t num_edges_per_node, const int64_t num_dst_nodes,
const int64_t num_edges,
const IdxType *unique_output_offsets,
IdxType *output_src, IdxType *output_dst, IdxType *output_frequency) {
int64_t start_idx = (blockIdx.x * TILE_SIZE) + threadIdx.x;
int64_t last_idx = start_idx + TILE_SIZE;
for (int64_t idx = start_idx; idx < last_idx; idx += BLOCK_SIZE) {
if (idx < num_dst_nodes) {
const int64_t dst_pos = (idx * num_edges_per_node);
assert(dst_pos < num_edges);
const IdxType dst = dst_data[dst_pos];
const IdxType last_output_offset = unique_output_offsets[idx + 1];
assert((last_output_offset - unique_output_offsets[idx]) <=
(unique_input_offsets[idx + 1] - unique_input_offsets[idx]));
for (IdxType output_idx = unique_output_offsets[idx], input_idx = unique_input_offsets[idx];
output_idx < last_output_offset; ++output_idx, ++input_idx) {
output_src[output_idx] = unique_src_edges[input_idx];
output_dst[output_idx] = dst;
output_frequency[output_idx] = static_cast<IdxType>(unique_frequency[input_idx]);
}
}
}
}
} // namespace
// return the old cnt of this edge
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::InsertEdge(
const IdxType &src, const IdxType &dst_idx) {
IdxType start_off = dst_idx * _num_items_each_dst;
IdxType pos = EdgeHash(src);
IdxType delta = 1;
IdxType old_cnt = static_cast<IdxType>(-1);
while (true) {
IdxType old_src = dgl::aten::cuda::AtomicCAS(
&_edge_hashmap[start_off + pos].src, static_cast<IdxType>(-1), src);
if (old_src == static_cast<IdxType>(-1) || old_src == src) {
// first insert
old_cnt = dgl::aten::cuda::AtomicAdd(
&_edge_hashmap[start_off + pos].cnt, static_cast<IdxType>(1));
if (old_src == static_cast<IdxType>(-1)) {
assert(dst_idx < _num_dst);
dgl::aten::cuda::AtomicAdd(&_dst_unique_edges[dst_idx], static_cast<IdxType>(1));
}
break;
}
pos = EdgeHash(pos + delta);
delta += 1;
}
return old_cnt;
}
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::GetDstCount(const IdxType &dst_idx) {
return _dst_unique_edges[dst_idx];
}
template<typename IdxType>
inline __device__ IdxType DeviceEdgeHashmap<IdxType>::GetEdgeCount(
const IdxType &src, const IdxType &dst_idx) {
IdxType start_off = dst_idx * _num_items_each_dst;
IdxType pos = EdgeHash(src);
IdxType delta = 1;
while (_edge_hashmap[start_off + pos].src != src) {
pos = EdgeHash(pos + delta);
delta += 1;
}
return _edge_hashmap[start_off + pos].cnt;
}
template <typename IdxType>
FrequencyHashmap<IdxType>::FrequencyHashmap(
int64_t num_dst, int64_t num_items_each_dst, DGLContext ctx,
cudaStream_t stream, int64_t edge_table_scale) {
_ctx = ctx;
_stream = stream;
num_items_each_dst = _table_size(num_items_each_dst, edge_table_scale);
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
auto dst_unique_edges = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, (num_dst) * sizeof(IdxType)));
auto edge_hashmap = static_cast<EdgeItem*>(
device->AllocWorkspace(_ctx, (num_dst * num_items_each_dst) * sizeof(EdgeItem)));
constexpr int BLOCK_SIZE = 256;
constexpr int TILE_SIZE = BLOCK_SIZE * 8;
dim3 block(BLOCK_SIZE);
dim3 grid((num_dst * num_items_each_dst + TILE_SIZE - 1) / TILE_SIZE);
cudaMemset(dst_unique_edges, 0, (num_dst) * sizeof(IdxType));
_init_edge_table<IdxType, BLOCK_SIZE, TILE_SIZE><<<grid, block, 0, _stream>>>(
edge_hashmap, (num_dst * num_items_each_dst));
_device_edge_hashmap = new DeviceEdgeHashmap<IdxType>(
num_dst, num_items_each_dst, dst_unique_edges, edge_hashmap);
_dst_unique_edges = dst_unique_edges;
_edge_hashmap = edge_hashmap;
}
template <typename IdxType>
FrequencyHashmap<IdxType>::~FrequencyHashmap() {
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
delete _device_edge_hashmap;
_device_edge_hashmap = nullptr;
device->FreeWorkspace(_ctx, _dst_unique_edges);
_dst_unique_edges = nullptr;
device->FreeWorkspace(_ctx, _edge_hashmap);
_edge_hashmap = nullptr;
}
template <typename IdxType>
std::tuple<IdArray, IdArray, IdArray> FrequencyHashmap<IdxType>::Topk(
const IdxType *src_data, const IdxType *dst_data, DLDataType dtype,
const int64_t num_edges, const int64_t num_edges_per_node,
const int64_t num_pick) {
using Idx64Type = int64_t;
const int64_t num_dst_nodes = (num_edges / num_edges_per_node);
constexpr int BLOCK_SIZE = 256;
// XXX: a experienced value, best performance in GV100
constexpr int TILE_SIZE = BLOCK_SIZE * 32;
const dim3 block(BLOCK_SIZE);
const dim3 edges_grid((num_edges + TILE_SIZE - 1) / TILE_SIZE);
auto device = dgl::runtime::DeviceAPI::Get(_ctx);
const IdxType num_edge_blocks = static_cast<IdxType>(edges_grid.x);
IdxType num_unique_edges = 0;
// to mark if this position of edges is the first inserting position for _edge_hashmap
bool *is_first_position = static_cast<bool*>(
device->AllocWorkspace(_ctx, sizeof(bool) * (num_edges)));
CUDA_CALL(cudaMemset(is_first_position, 0, sizeof(bool) * (num_edges)));
// double space to use ExclusiveSum
auto edge_blocks_prefix_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 2 * sizeof(IdxType) * (num_edge_blocks + 1)));
IdxType *edge_blocks_prefix = edge_blocks_prefix_data;
IdxType *edge_blocks_prefix_alternate = (edge_blocks_prefix_data + (num_edge_blocks + 1));
// triple space to use ExclusiveSum and unique_output_offsets
auto num_unique_each_node_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 3 * sizeof(IdxType) * (num_dst_nodes + 1)));
IdxType *num_unique_each_node = num_unique_each_node_data;
IdxType *num_unique_each_node_alternate = (num_unique_each_node_data + (num_dst_nodes + 1));
IdxType *unique_output_offsets = (num_unique_each_node_data + 2 * (num_dst_nodes + 1));
// 1. Scan the all edges and count the unique edges and unique edges for each dst node
_count_frequency<IdxType, BLOCK_SIZE, TILE_SIZE><<<edges_grid, block, 0, _stream>>>(
src_data, num_edges, num_edges_per_node,
edge_blocks_prefix, is_first_position, *_device_edge_hashmap);
// 2. Compact the unique edges frequency
// 2.1 ExclusiveSum the edge_blocks_prefix
void *d_temp_storage = nullptr;
size_t temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
edge_blocks_prefix, edge_blocks_prefix_alternate, num_edge_blocks + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
edge_blocks_prefix, edge_blocks_prefix_alternate, num_edge_blocks + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
std::swap(edge_blocks_prefix, edge_blocks_prefix_alternate);
device->CopyDataFromTo(&edge_blocks_prefix[num_edge_blocks], 0, &num_unique_edges, 0,
sizeof(num_unique_edges),
_ctx, DGLContext{kDLCPU, 0},
dtype, _stream);
device->StreamSync(_ctx, _stream);
// 2.2 Allocate the data of unique edges and frequency
// double space to use SegmentedRadixSort
auto unique_src_edges_data = static_cast<IdxType*>(
device->AllocWorkspace(_ctx, 2 * sizeof(IdxType) * (num_unique_edges)));
IdxType *unique_src_edges = unique_src_edges_data;
IdxType *unique_src_edges_alternate = unique_src_edges_data + num_unique_edges;
// double space to use SegmentedRadixSort
auto unique_frequency_data = static_cast<Idx64Type*>(
device->AllocWorkspace(_ctx, 2 * sizeof(Idx64Type) * (num_unique_edges)));
Idx64Type *unique_frequency = unique_frequency_data;
Idx64Type *unique_frequency_alternate = unique_frequency_data + num_unique_edges;
// 2.3 Compact the unique edges and their frequency
_compact_frequency<IdxType, Idx64Type, BLOCK_SIZE, TILE_SIZE><<<edges_grid, block, 0, _stream>>>(
src_data, dst_data, num_edges, num_edges_per_node,
edge_blocks_prefix, is_first_position, num_unique_each_node,
unique_src_edges, unique_frequency, *_device_edge_hashmap);
// 3. SegmentedRadixSort the unique edges and unique_frequency
// 3.1 ExclusiveSum the num_unique_each_node
d_temp_storage = nullptr;
temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, num_unique_each_node_alternate, num_dst_nodes + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, num_unique_each_node_alternate, num_dst_nodes + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
// 3.2 SegmentedRadixSort the unique_src_edges and unique_frequency
// Create a set of DoubleBuffers to wrap pairs of device pointers
cub::DoubleBuffer<Idx64Type> d_unique_frequency(unique_frequency, unique_frequency_alternate);
cub::DoubleBuffer<IdxType> d_unique_src_edges(unique_src_edges, unique_src_edges_alternate);
// Determine temporary device storage requirements
d_temp_storage = nullptr;
temp_storage_bytes = 0;
// the DeviceRadixSort is faster than DeviceSegmentedRadixSort,
// especially when num_dst_nodes is large (about ~10000)
if (dtype.bits == 32) {
CUDA_CALL(cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges));
} else {
CUDA_CALL(cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges, num_dst_nodes,
num_unique_each_node_alternate, num_unique_each_node_alternate + 1));
}
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
if (dtype.bits == 32) {
CUDA_CALL(cub::DeviceRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges));
} else {
CUDA_CALL(cub::DeviceSegmentedRadixSort::SortPairsDescending(d_temp_storage, temp_storage_bytes,
d_unique_frequency, d_unique_src_edges, num_unique_edges, num_dst_nodes,
num_unique_each_node_alternate, num_unique_each_node_alternate + 1));
}
device->FreeWorkspace(_ctx, d_temp_storage);
// 4. Get the final pick number for each dst node
// 4.1 Reset the min(num_pick, num_unique_each_node) to num_unique_each_node
constexpr int NODE_TILE_SIZE = BLOCK_SIZE * 2;
const dim3 nodes_grid((num_dst_nodes + NODE_TILE_SIZE - 1) / NODE_TILE_SIZE);
_get_pick_num<IdxType, BLOCK_SIZE, NODE_TILE_SIZE><<<nodes_grid, block, 0, _stream>>>(
num_unique_each_node, num_pick, num_dst_nodes);
// 4.2 ExclusiveSum the new num_unique_each_node as unique_output_offsets
// use unique_output_offsets;
d_temp_storage = nullptr;
temp_storage_bytes = 0;
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, unique_output_offsets, num_dst_nodes + 1));
d_temp_storage = device->AllocWorkspace(_ctx, temp_storage_bytes);
CUDA_CALL(cub::DeviceScan::ExclusiveSum(d_temp_storage, temp_storage_bytes,
num_unique_each_node, unique_output_offsets, num_dst_nodes + 1));
device->FreeWorkspace(_ctx, d_temp_storage);
// 5. Pick the data to result
IdxType num_output = 0;
device->CopyDataFromTo(&unique_output_offsets[num_dst_nodes], 0, &num_output, 0,
sizeof(num_output),
_ctx, DGLContext{kDLCPU, 0},
dtype, _stream);
device->StreamSync(_ctx, _stream);
IdArray res_src = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
IdArray res_dst = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
IdArray res_cnt = IdArray::Empty({static_cast<int64_t>(num_output)},
dtype, _ctx);
_pick_data<IdxType, Idx64Type, BLOCK_SIZE, NODE_TILE_SIZE><<<nodes_grid, block, 0, _stream>>>(
d_unique_frequency.Current(), d_unique_src_edges.Current(), num_unique_each_node_alternate,
dst_data, num_edges_per_node, num_dst_nodes, num_edges,
unique_output_offsets,
res_src.Ptr<IdxType>(), res_dst.Ptr<IdxType>(), res_cnt.Ptr<IdxType>());
device->FreeWorkspace(_ctx, is_first_position);
device->FreeWorkspace(_ctx, edge_blocks_prefix_data);
device->FreeWorkspace(_ctx, num_unique_each_node_data);
device->FreeWorkspace(_ctx, unique_src_edges_data);
device->FreeWorkspace(_ctx, unique_frequency_data);
return std::make_tuple(res_src, res_dst, res_cnt);
}
template
class FrequencyHashmap<int64_t>;
template
class FrequencyHashmap<int32_t>;
}; // namespace impl
}; // namespace sampling
}; // namespace dgl
|
18679c9ef1fab0f4306d253b5d5acd7f7646e964.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
__global__ void bcast(int arg) {
int laneId = threadIdx.x & 0x1f;
int value;
if (laneId == 0) // Note unused variable for
value = arg; // all threads except lane 0
else {
value = arg + 3;
}
value = __shfl_sync(0xffffffff, value, 4); // Synchronize all threads in warp, and get "value" from lane 0
// if (value != arg)
// printf("Thread %d failed.\n", threadIdx.x);
// else {
printf("%d %d\n", threadIdx.x, value);
//}
}
__global__ void reduce() {
int laneId = threadIdx.x & 0x1f;
int value = laneId;
for ( int offset = 32 / 2 ; offset > 0 ; offset /= 2) {
value += __shfl_down_sync(0xffffffff, value, offset); // Synchronize all threads in warp, and get "value" from lane 0
}
printf("%d %d\n", threadIdx.x, value);
}
__global__ void reduce1() {
int laneId = threadIdx.x & 0x1f;
int value = laneId;
value = __shfl_down_sync(0xffffffff, value, 16);
printf("%d %d\n", threadIdx.x, value);
}
__global__ void reduce2() {
int laneId = threadIdx.x & 0x1f;
int value = laneId;
for ( int offset = 32 / 2 ; offset > 0 ; offset /= 2) {
value += __shfl_xor_sync(0xffffffff, value, offset); // Synchronize all threads in warp, and get "value" from lane 0
}
printf("%d %d\n", threadIdx.x, value);
}
int main() {
hipLaunchKernelGGL(( reduce2), dim3(1), dim3(64) , 0, 0, );
hipDeviceSynchronize();
return 0;
} | 18679c9ef1fab0f4306d253b5d5acd7f7646e964.cu | #include <stdio.h>
#include <cuda_runtime.h>
__global__ void bcast(int arg) {
int laneId = threadIdx.x & 0x1f;
int value;
if (laneId == 0) // Note unused variable for
value = arg; // all threads except lane 0
else {
value = arg + 3;
}
value = __shfl_sync(0xffffffff, value, 4); // Synchronize all threads in warp, and get "value" from lane 0
// if (value != arg)
// printf("Thread %d failed.\n", threadIdx.x);
// else {
printf("%d %d\n", threadIdx.x, value);
//}
}
__global__ void reduce() {
int laneId = threadIdx.x & 0x1f;
int value = laneId;
for ( int offset = 32 / 2 ; offset > 0 ; offset /= 2) {
value += __shfl_down_sync(0xffffffff, value, offset); // Synchronize all threads in warp, and get "value" from lane 0
}
printf("%d %d\n", threadIdx.x, value);
}
__global__ void reduce1() {
int laneId = threadIdx.x & 0x1f;
int value = laneId;
value = __shfl_down_sync(0xffffffff, value, 16);
printf("%d %d\n", threadIdx.x, value);
}
__global__ void reduce2() {
int laneId = threadIdx.x & 0x1f;
int value = laneId;
for ( int offset = 32 / 2 ; offset > 0 ; offset /= 2) {
value += __shfl_xor_sync(0xffffffff, value, offset); // Synchronize all threads in warp, and get "value" from lane 0
}
printf("%d %d\n", threadIdx.x, value);
}
int main() {
reduce2<<< 1, 64 >>>();
cudaDeviceSynchronize();
return 0;
} |
038190a01960d90ce9dd990af10d9bd5f429165e.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
#include <hipsparse.h>
#include "tridiagonal.h"
template<typename val_t>
__global__ void Thomas_kernel(val_t *a_gbl, val_t *b_gbl, val_t *c_gbl, val_t *d_gbl, int n_eqt, int n_batch) {
int task_id = blockIdx.x * blockDim.x + threadIdx.x;
if(task_id < n_batch){
int i;
val_t *a = a_gbl + task_id*n_eqt;
val_t *b = b_gbl + task_id*n_eqt;
val_t *c = c_gbl + task_id*n_eqt;
val_t *r = d_gbl + task_id*n_eqt;
val_t cc = c[0]/b[0];
val_t rr = r[0]/b[0];
c[0] = cc;
r[0] = rr;
for(i = 1; i < n_eqt; i ++) {
val_t k = (b[i] - cc*a[i]);
cc = c[i] / k;
rr = (r[i] - rr*a[i]) / k;
c[i] = cc;
r[i] = rr;
}
for(i = n_eqt-2; i >= 0; i --) {
rr = r[i] - c[i] * rr;
r[i] = rr;
}
}
}
template<typename val_t>
__global__ void CR_kernel(val_t *a_gbl, val_t *b_gbl, val_t *c_gbl, val_t *d_gbl, val_t *buffer, int n_eqt, int n_batch) {
const int tid = threadIdx.x;
const int tnum = blockDim.x;
const int bid = blockIdx.x;
val_t *a = a_gbl + bid*n_eqt;
val_t *b = b_gbl + bid*n_eqt;
val_t *c = c_gbl + bid*n_eqt;
val_t *d = d_gbl + bid*n_eqt;
val_t *a1 = a;
val_t *b1 = b;
val_t *c1 = c;
val_t *d1 = d;
val_t *a2 = buffer + 0*n_eqt*n_batch + bid*n_eqt;
val_t *b2 = buffer + 1*n_eqt*n_batch + bid*n_eqt;
val_t *c2 = buffer + 2*n_eqt*n_batch + bid*n_eqt;
val_t *d2 = buffer + 3*n_eqt*n_batch + bid*n_eqt;
__shared__ int length_record[32];
int iter = 0;
if(tid==0) length_record[0] = n_eqt;
__syncthreads();
//CR Forward
while(length_record[iter] >= 2*tnum) {
int n = length_record[iter]/2;
if(length_record[iter]%2)
{
int i = tid;
for(i = tid; i < n; i += tnum) {
val_t k1 = a1[2*i+1]/b1[2*i-0];
val_t k2 = c1[2*i+1]/b1[2*i+2];
a2[i] = -a1[2*i-0] * k1;
b2[i] = b1[2*i+1] - c1[2*i-0]*k1 - a1[2*i+2]*k2;
c2[i] = -c1[2*i+2] * k2;
d2[i] = d1[2*i+1] - d1[2*i-0]*k1 - d1[2*i+2]*k2;
}
} else {
int i = tid;
{
val_t k1 = a1[2*i]/((tid==0)?1:b1[2*i-1]);
val_t k2 = c1[2*i]/b1[2*i+1];
a2[i] = -((tid==0)?0:a1[2*i-1]) * k1;
b2[i] = b1[2*i] - ((tid==0)?0:c1[2*i-1])*k1 - a1[2*i+1]*k2;
c2[i] = -c1[2*i+1] * k2;
d2[i] = d1[2*i] - ((tid==0)?0:d1[2*i-1])*k1 - d1[2*i+1]*k2;
}
for(i = tid+tnum; i < n; i += tnum) {
val_t k1 = a1[2*i]/b1[2*i-1];
val_t k2 = c1[2*i]/b1[2*i+1];
a2[i] = -a1[2*i-1] * k1;
b2[i] = b1[2*i] - c1[2*i-1]*k1 - a1[2*i+1]*k2;
c2[i] = -c1[2*i+1] * k2;
d2[i] = d1[2*i] - d1[2*i-1]*k1 - d1[2*i+1]*k2;
}
}
a1 = a2;
b1 = b2;
c1 = c2;
d1 = d2;
a2 = a2 + n;
b2 = b2 + n;
c2 = c2 + n;
d2 = d2 + n;
++iter;
if(tid == 0)length_record[iter] = n;
__syncthreads();
}
//Thomas
if(tid == 0) {
int n = length_record[iter];
c1[0] = c1[0]/b1[0];
d1[0] = d1[0]/b1[0];
int i;
for(i = 1; i < n; i ++) {
val_t k = b1[i]-c1[i-1]*a1[i];
c1[i] = c1[i] / k;
d1[i] = (d1[i]-d1[i-1]*a1[i]) / k;
}
for(i = n-2; i >= 0; i --) {
d1[i] = d1[i] - c1[i]*d1[i+1];
}
}
__syncthreads();
iter --;
while(iter >= 0) {
d2 = d1;
if(iter > 0) {
a1 = a1 - length_record[iter];
b1 = b1 - length_record[iter];
c1 = c1 - length_record[iter];
d1 = d1 - length_record[iter];
} else {
a1 = a;
b1 = b;
c1 = c;
d1 = d;
}
int n = length_record[iter]/2;
if(length_record[iter]%2) {
int ii = tid;
{
int i = n - 1 - ii;
d1[2*i+1] = d2[i];
d1[2*i+2] = (d1[2*i+2] - a1[2*i+2]*d2[i] - c1[2*i+2]*((tid==0)?0:d2[i+1]))/b1[2*i+2];
}
for(ii = tid+tnum; ii < n; ii += tnum) {
int i = n - 1 - ii;
d1[2*i+1] = d2[i];
d1[2*i+2] = (d1[2*i+2] - a1[2*i+2]*d2[i] - c1[2*i+2]*d2[i+1])/b1[2*i+2];
}
if(tid == 0)
d1[0] = (d1[0] - c1[0]*d2[0])/b1[0];
} else {
int ii = tid;
{
int i = n - 1 - ii;
d1[2*i] = d2[i];
d1[2*i+1] = (d1[2*i+1] - a1[2*i+1]*d2[i] - c1[2*i+1]*((tid==0)?0:d2[i+1]))/b1[2*i+1];
}
for(ii = tid+tnum; ii < n; ii += tnum) {
int i = n - 1 - ii;
d1[2*i] = d2[i];
d1[2*i+1] = (d1[2*i+1] - a1[2*i+1]*d2[i] - c1[2*i+1]*d2[i+1])/b1[2*i+1];
}
}
iter --;
__syncthreads();
}
}
#define THOMAS_THREAD_NUM 64
#define CR_THREAD_NUM 64
static void *buffer;
void Thomas_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
int block_num = (n_batch-1) / THOMAS_THREAD_NUM + 1;
hipLaunchKernelGGL(( Thomas_kernel<float>), dim3(block_num), dim3(THOMAS_THREAD_NUM), 0, 0, a_gbl, b_gbl, c_gbl, d_gbl, n_eqt, n_batch);
hipDeviceSynchronize();
}
void CR_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
hipLaunchKernelGGL(( CR_kernel<float>), dim3(n_batch), dim3(CR_THREAD_NUM), 0, 0, a_gbl, b_gbl, c_gbl, d_gbl, static_cast<float*>(buffer), n_eqt, n_batch);
hipDeviceSynchronize();
}
void Thomas_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
int block_num = (n_batch-1) / THOMAS_THREAD_NUM + 1;
hipLaunchKernelGGL(( Thomas_kernel<double>), dim3(block_num), dim3(THOMAS_THREAD_NUM), 0, 0, a_gbl, b_gbl, c_gbl, d_gbl, n_eqt, n_batch);
hipDeviceSynchronize();
}
void CR_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
hipLaunchKernelGGL(( CR_kernel<double>), dim3(n_batch), dim3(CR_THREAD_NUM), 0, 0, a_gbl, b_gbl, c_gbl, d_gbl, static_cast<double*>(buffer), n_eqt, n_batch);
hipDeviceSynchronize();
}
int CR_init_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
return hipMalloc(&buffer, 4 * n_eqt * n_batch * sizeof(float));
}
int CR_init_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
return hipMalloc(&buffer, 4 * n_eqt * n_batch * sizeof(double));
}
void CR_final() {
hipFree(buffer);
}
static hipsparseHandle_t cusparse_handle;
int cuSparse_init_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
size_t size;
int ret = hipsparseCreate(&cusparse_handle);
if(ret != HIPSPARSE_STATUS_SUCCESS) {
return ret;
}
hipsparseSgtsv2StridedBatch_bufferSizeExt(cusparse_handle, n_eqt, a_gbl, b_gbl, c_gbl, d_gbl, n_batch, n_eqt, &size);
ret = hipMalloc(&buffer, size);
if(ret != hipSuccess) {
hipsparseDestroy(cusparse_handle);
}
return ret;
}
int cuSparse_init_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
size_t size;
int ret = hipsparseCreate(&cusparse_handle);
if(ret != HIPSPARSE_STATUS_SUCCESS) {
return ret;
}
hipsparseDgtsv2StridedBatch_bufferSizeExt(cusparse_handle, n_eqt, a_gbl, b_gbl, c_gbl, d_gbl, n_batch, n_eqt, &size);
ret = hipMalloc(&buffer, size);
if(ret != hipSuccess) {
hipsparseDestroy(cusparse_handle);
}
return ret;
}
void cuSparse_final() {
hipFree(buffer);
hipsparseDestroy(cusparse_handle);
}
void cuSparse_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
hipsparseSgtsv2StridedBatch(cusparse_handle, n_eqt, a_gbl, b_gbl, c_gbl, d_gbl, n_batch, n_eqt, buffer);
hipDeviceSynchronize();
}
void cuSparse_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
hipsparseDgtsv2StridedBatch(cusparse_handle, n_eqt, a_gbl, b_gbl, c_gbl, d_gbl, n_batch, n_eqt, buffer);
hipDeviceSynchronize();
} | 038190a01960d90ce9dd990af10d9bd5f429165e.cu | #include <cusparse.h>
#include "tridiagonal.h"
template<typename val_t>
__global__ void Thomas_kernel(val_t *a_gbl, val_t *b_gbl, val_t *c_gbl, val_t *d_gbl, int n_eqt, int n_batch) {
int task_id = blockIdx.x * blockDim.x + threadIdx.x;
if(task_id < n_batch){
int i;
val_t *a = a_gbl + task_id*n_eqt;
val_t *b = b_gbl + task_id*n_eqt;
val_t *c = c_gbl + task_id*n_eqt;
val_t *r = d_gbl + task_id*n_eqt;
val_t cc = c[0]/b[0];
val_t rr = r[0]/b[0];
c[0] = cc;
r[0] = rr;
for(i = 1; i < n_eqt; i ++) {
val_t k = (b[i] - cc*a[i]);
cc = c[i] / k;
rr = (r[i] - rr*a[i]) / k;
c[i] = cc;
r[i] = rr;
}
for(i = n_eqt-2; i >= 0; i --) {
rr = r[i] - c[i] * rr;
r[i] = rr;
}
}
}
template<typename val_t>
__global__ void CR_kernel(val_t *a_gbl, val_t *b_gbl, val_t *c_gbl, val_t *d_gbl, val_t *buffer, int n_eqt, int n_batch) {
const int tid = threadIdx.x;
const int tnum = blockDim.x;
const int bid = blockIdx.x;
val_t *a = a_gbl + bid*n_eqt;
val_t *b = b_gbl + bid*n_eqt;
val_t *c = c_gbl + bid*n_eqt;
val_t *d = d_gbl + bid*n_eqt;
val_t *a1 = a;
val_t *b1 = b;
val_t *c1 = c;
val_t *d1 = d;
val_t *a2 = buffer + 0*n_eqt*n_batch + bid*n_eqt;
val_t *b2 = buffer + 1*n_eqt*n_batch + bid*n_eqt;
val_t *c2 = buffer + 2*n_eqt*n_batch + bid*n_eqt;
val_t *d2 = buffer + 3*n_eqt*n_batch + bid*n_eqt;
__shared__ int length_record[32];
int iter = 0;
if(tid==0) length_record[0] = n_eqt;
__syncthreads();
//CR Forward
while(length_record[iter] >= 2*tnum) {
int n = length_record[iter]/2;
if(length_record[iter]%2)
{
int i = tid;
for(i = tid; i < n; i += tnum) {
val_t k1 = a1[2*i+1]/b1[2*i-0];
val_t k2 = c1[2*i+1]/b1[2*i+2];
a2[i] = -a1[2*i-0] * k1;
b2[i] = b1[2*i+1] - c1[2*i-0]*k1 - a1[2*i+2]*k2;
c2[i] = -c1[2*i+2] * k2;
d2[i] = d1[2*i+1] - d1[2*i-0]*k1 - d1[2*i+2]*k2;
}
} else {
int i = tid;
{
val_t k1 = a1[2*i]/((tid==0)?1:b1[2*i-1]);
val_t k2 = c1[2*i]/b1[2*i+1];
a2[i] = -((tid==0)?0:a1[2*i-1]) * k1;
b2[i] = b1[2*i] - ((tid==0)?0:c1[2*i-1])*k1 - a1[2*i+1]*k2;
c2[i] = -c1[2*i+1] * k2;
d2[i] = d1[2*i] - ((tid==0)?0:d1[2*i-1])*k1 - d1[2*i+1]*k2;
}
for(i = tid+tnum; i < n; i += tnum) {
val_t k1 = a1[2*i]/b1[2*i-1];
val_t k2 = c1[2*i]/b1[2*i+1];
a2[i] = -a1[2*i-1] * k1;
b2[i] = b1[2*i] - c1[2*i-1]*k1 - a1[2*i+1]*k2;
c2[i] = -c1[2*i+1] * k2;
d2[i] = d1[2*i] - d1[2*i-1]*k1 - d1[2*i+1]*k2;
}
}
a1 = a2;
b1 = b2;
c1 = c2;
d1 = d2;
a2 = a2 + n;
b2 = b2 + n;
c2 = c2 + n;
d2 = d2 + n;
++iter;
if(tid == 0)length_record[iter] = n;
__syncthreads();
}
//Thomas
if(tid == 0) {
int n = length_record[iter];
c1[0] = c1[0]/b1[0];
d1[0] = d1[0]/b1[0];
int i;
for(i = 1; i < n; i ++) {
val_t k = b1[i]-c1[i-1]*a1[i];
c1[i] = c1[i] / k;
d1[i] = (d1[i]-d1[i-1]*a1[i]) / k;
}
for(i = n-2; i >= 0; i --) {
d1[i] = d1[i] - c1[i]*d1[i+1];
}
}
__syncthreads();
iter --;
while(iter >= 0) {
d2 = d1;
if(iter > 0) {
a1 = a1 - length_record[iter];
b1 = b1 - length_record[iter];
c1 = c1 - length_record[iter];
d1 = d1 - length_record[iter];
} else {
a1 = a;
b1 = b;
c1 = c;
d1 = d;
}
int n = length_record[iter]/2;
if(length_record[iter]%2) {
int ii = tid;
{
int i = n - 1 - ii;
d1[2*i+1] = d2[i];
d1[2*i+2] = (d1[2*i+2] - a1[2*i+2]*d2[i] - c1[2*i+2]*((tid==0)?0:d2[i+1]))/b1[2*i+2];
}
for(ii = tid+tnum; ii < n; ii += tnum) {
int i = n - 1 - ii;
d1[2*i+1] = d2[i];
d1[2*i+2] = (d1[2*i+2] - a1[2*i+2]*d2[i] - c1[2*i+2]*d2[i+1])/b1[2*i+2];
}
if(tid == 0)
d1[0] = (d1[0] - c1[0]*d2[0])/b1[0];
} else {
int ii = tid;
{
int i = n - 1 - ii;
d1[2*i] = d2[i];
d1[2*i+1] = (d1[2*i+1] - a1[2*i+1]*d2[i] - c1[2*i+1]*((tid==0)?0:d2[i+1]))/b1[2*i+1];
}
for(ii = tid+tnum; ii < n; ii += tnum) {
int i = n - 1 - ii;
d1[2*i] = d2[i];
d1[2*i+1] = (d1[2*i+1] - a1[2*i+1]*d2[i] - c1[2*i+1]*d2[i+1])/b1[2*i+1];
}
}
iter --;
__syncthreads();
}
}
#define THOMAS_THREAD_NUM 64
#define CR_THREAD_NUM 64
static void *buffer;
void Thomas_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
int block_num = (n_batch-1) / THOMAS_THREAD_NUM + 1;
Thomas_kernel<float><<<block_num, THOMAS_THREAD_NUM>>>(a_gbl, b_gbl, c_gbl, d_gbl, n_eqt, n_batch);
cudaDeviceSynchronize();
}
void CR_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
CR_kernel<float><<<n_batch, CR_THREAD_NUM>>>(a_gbl, b_gbl, c_gbl, d_gbl, static_cast<float*>(buffer), n_eqt, n_batch);
cudaDeviceSynchronize();
}
void Thomas_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
int block_num = (n_batch-1) / THOMAS_THREAD_NUM + 1;
Thomas_kernel<double><<<block_num, THOMAS_THREAD_NUM>>>(a_gbl, b_gbl, c_gbl, d_gbl, n_eqt, n_batch);
cudaDeviceSynchronize();
}
void CR_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
CR_kernel<double><<<n_batch, CR_THREAD_NUM>>>(a_gbl, b_gbl, c_gbl, d_gbl, static_cast<double*>(buffer), n_eqt, n_batch);
cudaDeviceSynchronize();
}
int CR_init_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
return cudaMalloc(&buffer, 4 * n_eqt * n_batch * sizeof(float));
}
int CR_init_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
return cudaMalloc(&buffer, 4 * n_eqt * n_batch * sizeof(double));
}
void CR_final() {
cudaFree(buffer);
}
static cusparseHandle_t cusparse_handle;
int cuSparse_init_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
size_t size;
int ret = cusparseCreate(&cusparse_handle);
if(ret != CUSPARSE_STATUS_SUCCESS) {
return ret;
}
cusparseSgtsv2StridedBatch_bufferSizeExt(cusparse_handle, n_eqt, a_gbl, b_gbl, c_gbl, d_gbl, n_batch, n_eqt, &size);
ret = cudaMalloc(&buffer, size);
if(ret != cudaSuccess) {
cusparseDestroy(cusparse_handle);
}
return ret;
}
int cuSparse_init_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
size_t size;
int ret = cusparseCreate(&cusparse_handle);
if(ret != CUSPARSE_STATUS_SUCCESS) {
return ret;
}
cusparseDgtsv2StridedBatch_bufferSizeExt(cusparse_handle, n_eqt, a_gbl, b_gbl, c_gbl, d_gbl, n_batch, n_eqt, &size);
ret = cudaMalloc(&buffer, size);
if(ret != cudaSuccess) {
cusparseDestroy(cusparse_handle);
}
return ret;
}
void cuSparse_final() {
cudaFree(buffer);
cusparseDestroy(cusparse_handle);
}
void cuSparse_single(float *a_gbl, float *b_gbl, float *c_gbl, float *d_gbl, int n_eqt, int n_batch) {
cusparseSgtsv2StridedBatch(cusparse_handle, n_eqt, a_gbl, b_gbl, c_gbl, d_gbl, n_batch, n_eqt, buffer);
cudaDeviceSynchronize();
}
void cuSparse_double(double *a_gbl, double *b_gbl, double *c_gbl, double *d_gbl, int n_eqt, int n_batch) {
cusparseDgtsv2StridedBatch(cusparse_handle, n_eqt, a_gbl, b_gbl, c_gbl, d_gbl, n_batch, n_eqt, buffer);
cudaDeviceSynchronize();
} |
d28d1a05e5904d84550ff5353eeace6d165f55d6.hip | // !!! This is a file automatically generated by hipify!!!
#include <stdio.h>
#include <hip/hip_runtime.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <stdbool.h>
using namespace cv;
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(hipError_t code, const char *file, int line, bool abort=true)
{
if (code != hipSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", hipGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef struct
{
float x,y,w,h,s;
}box;
__device__
float IOUcalc(box b1, box b2)
{
float ai = (float)(b1.w + 1)*(b1.h + 1);
float aj = (float)(b2.w + 1)*(b2.h + 1);
float x_inter, x2_inter, y_inter, y2_inter;
x_inter = max(b1.x,b1.x);
y_inter = max(b1.y,b2.y);
x2_inter = min((b1.x + b1.w),(b2.x + b2.w));
y2_inter = min((b1.y + b1.h),(b2.y + b2.h));
float w = (float)max((float)0, x2_inter - x_inter);
float h = (float)max((float)0, y2_inter - y_inter);
float inter = ((w*h)/(ai + aj - w*h));
return inter;
}
__global__
void NMS_GPU(box *d_b, bool *d_res)
{
int abs_y = (blockIdx.y * blockDim.y) + threadIdx.y;
int abs_x = (blockIdx.x * blockDim.x) +threadIdx.x;
float theta = 0.6;
if(d_b[abs_x].s < d_b[abs_y].s)
{
if(IOUcalc(d_b[abs_y],d_b[abs_x])>theta)
{
d_res[abs_x] = false;
}
}
}
int main()
{
int count =3;
Mat temp = imread("/home/jeetkanjani7/pedestrian_imgs/crop001025b.png",1);
bool *h_res =(bool *)malloc(sizeof(bool)*count);
for(int i=0; i<count; i++)
{
h_res[i] = true;
}
box b[3];
b[1].x = 16; b[1].y = 12; b[1].w = 64; b[1].h = 128; b[1].s = 0.79062;
b[2].x = 12; b[2].y = 14; b[2].w = 70; b[2].h = 141; b[2].s = 0.60434 ;
b[0].x = 11; b[0].y = 6; b[0].w = 74; b[0].h = 148; b[0].s = 0.11855;
box *d_b;
bool *d_res;
gpuErrchk(hipMalloc((void**)&d_res, count*sizeof(bool)));
gpuErrchk(hipMemcpy(d_res, h_res,sizeof(bool)*count, hipMemcpyHostToDevice));
gpuErrchk(hipMalloc((void**)&d_b,sizeof(box)*count));
gpuErrchk(hipMemcpy(d_b, b,sizeof(box)*count, hipMemcpyHostToDevice));
hipLaunchKernelGGL(( NMS_GPU), dim3(dim3(1,count,1)),dim3(count), 0, 0, d_b,d_res);
hipDeviceSynchronize();
gpuErrchk(hipMemcpy(h_res, d_res, sizeof(bool)*count, hipMemcpyDeviceToHost));
for(int i =0; i<3 ; i++)
{
if(*(h_res+i) == true)
{
printf("Results= %d--%d ",i,*(h_res+i));
rectangle(temp,Point(b[i].x,b[i].y),Point(b[i].x + b[i].w,b[i].y + b[i].h),Scalar(100,100,100),2,8,0);
}
}
imshow("hello",temp);
waitKey(0);
return 0;
}
| d28d1a05e5904d84550ff5353eeace6d165f55d6.cu | #include <stdio.h>
#include <cuda_runtime.h>
#include <iostream>
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include <stdbool.h>
using namespace cv;
using namespace std;
#define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__); }
inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort=true)
{
if (code != cudaSuccess)
{
fprintf(stderr,"GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line);
if (abort) exit(code);
}
}
typedef struct
{
float x,y,w,h,s;
}box;
__device__
float IOUcalc(box b1, box b2)
{
float ai = (float)(b1.w + 1)*(b1.h + 1);
float aj = (float)(b2.w + 1)*(b2.h + 1);
float x_inter, x2_inter, y_inter, y2_inter;
x_inter = max(b1.x,b1.x);
y_inter = max(b1.y,b2.y);
x2_inter = min((b1.x + b1.w),(b2.x + b2.w));
y2_inter = min((b1.y + b1.h),(b2.y + b2.h));
float w = (float)max((float)0, x2_inter - x_inter);
float h = (float)max((float)0, y2_inter - y_inter);
float inter = ((w*h)/(ai + aj - w*h));
return inter;
}
__global__
void NMS_GPU(box *d_b, bool *d_res)
{
int abs_y = (blockIdx.y * blockDim.y) + threadIdx.y;
int abs_x = (blockIdx.x * blockDim.x) +threadIdx.x;
float theta = 0.6;
if(d_b[abs_x].s < d_b[abs_y].s)
{
if(IOUcalc(d_b[abs_y],d_b[abs_x])>theta)
{
d_res[abs_x] = false;
}
}
}
int main()
{
int count =3;
Mat temp = imread("/home/jeetkanjani7/pedestrian_imgs/crop001025b.png",1);
bool *h_res =(bool *)malloc(sizeof(bool)*count);
for(int i=0; i<count; i++)
{
h_res[i] = true;
}
box b[3];
b[1].x = 16; b[1].y = 12; b[1].w = 64; b[1].h = 128; b[1].s = 0.79062;
b[2].x = 12; b[2].y = 14; b[2].w = 70; b[2].h = 141; b[2].s = 0.60434 ;
b[0].x = 11; b[0].y = 6; b[0].w = 74; b[0].h = 148; b[0].s = 0.11855;
box *d_b;
bool *d_res;
gpuErrchk(cudaMalloc((void**)&d_res, count*sizeof(bool)));
gpuErrchk(cudaMemcpy(d_res, h_res,sizeof(bool)*count, cudaMemcpyHostToDevice));
gpuErrchk(cudaMalloc((void**)&d_b,sizeof(box)*count));
gpuErrchk(cudaMemcpy(d_b, b,sizeof(box)*count, cudaMemcpyHostToDevice));
NMS_GPU<<<dim3(1,count,1),count>>>(d_b,d_res);
cudaThreadSynchronize();
gpuErrchk(cudaMemcpy(h_res, d_res, sizeof(bool)*count, cudaMemcpyDeviceToHost));
for(int i =0; i<3 ; i++)
{
if(*(h_res+i) == true)
{
printf("Results= %d--%d ",i,*(h_res+i));
rectangle(temp,Point(b[i].x,b[i].y),Point(b[i].x + b[i].w,b[i].y + b[i].h),Scalar(100,100,100),2,8,0);
}
}
imshow("hello",temp);
waitKey(0);
return 0;
}
|
27465855b1af763579d9380113512a6b201b8fe5.hip | // !!! This is a file automatically generated by hipify!!!
#include "hip/hip_runtime.h"
/*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 1024 * 1024 )
#define RADIUS 5
#define THREADS_PER_BLOCK 64
/* stencil kernel */
__global__ void stencil_1d(int n, double *in, double *out)
{
/* allocate shared memory */
__shared__ double temp[THREADS_PER_BLOCK + 2*(RADIUS)];
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = threadIdx.x + RADIUS;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* read input elements into shared memory */
temp[localIndex] = in[globalIndex];
/* code to handle the halos. need to make sure we don't walk off the end
of the array */
if( threadIdx.x < RADIUS && globalIndex >= RADIUS )
{
temp[localIndex - RADIUS] = in[globalIndex - RADIUS];
} /* end if */
if( threadIdx.x < RADIUS && globalIndex < (n - RADIUS) )
{
temp[localIndex + THREADS_PER_BLOCK] = in[globalIndex + THREADS_PER_BLOCK];
} /* end if */
__syncthreads();
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = -(RADIUS); i <= (RADIUS); i++ )
{
result += temp[localIndex + i];
} /* end for */
out[globalIndex] = result;
return;
}
int main()
{
double *in, *out;
double *d_in, *d_out;
int size = N * sizeof( double );
/* get GPU device number and name */
int dev;
hipDeviceProp_t deviceProp;
checkCUDA( hipGetDevice( &dev ) );
checkCUDA( hipGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( hipMalloc( (void **) &d_in, size ) );
checkCUDA( hipMalloc( (void **) &d_out, size ) );
/* allocate space for host copies of in, out and setup input values */
in = (double *)malloc( size );
out = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
in[i] = (double) i;
out[i] = -99.0;
}
/* copy inputs to device */
checkCUDA( hipMemcpy( d_in, in, size, hipMemcpyHostToDevice ) );
checkCUDA( hipMemset( d_out, 0, size ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
dim3 blocks( (N / threads.x) + 1, 1, 1);
/* start the timers */
hipEvent_t start, stop;
checkCUDA( hipEventCreate( &start ) );
checkCUDA( hipEventCreate( &stop ) );
checkCUDA( hipEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
hipLaunchKernelGGL(( stencil_1d), dim3(blocks), dim3(threads) , 0, 0, N, d_in, d_out );
checkKERNEL()
/* stop the timers */
checkCUDA( hipEventRecord( stop, 0 ) );
checkCUDA( hipEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( hipEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total time for %d elements was %f ms\n", N, elapsedTime );
/* copy result back to host */
checkCUDA( hipMemcpy( out, d_out, size, hipMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
if( in[i]*( (double)RADIUS*2+1 ) != out[i] )
{
printf("error in element %d in = %f out %f\n",i,in[i],out[i] );
success = 0;
break;
} /* end if */
} /* end for */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(in);
free(out);
checkCUDA( hipFree( d_in ) );
checkCUDA( hipFree( d_out ) );
checkCUDA( hipDeviceSynchronize() );
return 0;
} /* end main */
| 27465855b1af763579d9380113512a6b201b8fe5.cu | /*
* Copyright 2015 NVIDIA Corporation
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include <stdio.h>
#include "../debug.h"
#define N ( 1024 * 1024 )
#define RADIUS 5
#define THREADS_PER_BLOCK 64
/* stencil kernel */
__global__ void stencil_1d(int n, double *in, double *out)
{
/* allocate shared memory */
__shared__ double temp[THREADS_PER_BLOCK + 2*(RADIUS)];
/* calculate global index in the array */
int globalIndex = blockIdx.x * blockDim.x + threadIdx.x;
int localIndex = threadIdx.x + RADIUS;
/* return if my global index is larger than the array size */
if( globalIndex >= n ) return;
/* read input elements into shared memory */
temp[localIndex] = in[globalIndex];
/* code to handle the halos. need to make sure we don't walk off the end
of the array */
if( threadIdx.x < RADIUS && globalIndex >= RADIUS )
{
temp[localIndex - RADIUS] = in[globalIndex - RADIUS];
} /* end if */
if( threadIdx.x < RADIUS && globalIndex < (n - RADIUS) )
{
temp[localIndex + THREADS_PER_BLOCK] = in[globalIndex + THREADS_PER_BLOCK];
} /* end if */
__syncthreads();
/* code to handle the boundary conditions */
if( globalIndex < RADIUS || globalIndex >= (n - RADIUS) )
{
out[globalIndex] = (double) globalIndex * ( (double)RADIUS*2 + 1) ;
return;
} /* end if */
double result = 0.0;
for( int i = -(RADIUS); i <= (RADIUS); i++ )
{
result += temp[localIndex + i];
} /* end for */
out[globalIndex] = result;
return;
}
int main()
{
double *in, *out;
double *d_in, *d_out;
int size = N * sizeof( double );
/* get GPU device number and name */
int dev;
cudaDeviceProp deviceProp;
checkCUDA( cudaGetDevice( &dev ) );
checkCUDA( cudaGetDeviceProperties( &deviceProp, dev ) );
printf("Using GPU %d: %s\n", dev, deviceProp.name );
/* allocate space for device copies of in, out */
checkCUDA( cudaMalloc( (void **) &d_in, size ) );
checkCUDA( cudaMalloc( (void **) &d_out, size ) );
/* allocate space for host copies of in, out and setup input values */
in = (double *)malloc( size );
out = (double *)malloc( size );
for( int i = 0; i < N; i++ )
{
in[i] = (double) i;
out[i] = -99.0;
}
/* copy inputs to device */
checkCUDA( cudaMemcpy( d_in, in, size, cudaMemcpyHostToDevice ) );
checkCUDA( cudaMemset( d_out, 0, size ) );
/* calculate block and grid sizes */
dim3 threads( THREADS_PER_BLOCK, 1, 1);
dim3 blocks( (N / threads.x) + 1, 1, 1);
/* start the timers */
cudaEvent_t start, stop;
checkCUDA( cudaEventCreate( &start ) );
checkCUDA( cudaEventCreate( &stop ) );
checkCUDA( cudaEventRecord( start, 0 ) );
/* launch the kernel on the GPU */
stencil_1d<<< blocks, threads >>>( N, d_in, d_out );
checkKERNEL()
/* stop the timers */
checkCUDA( cudaEventRecord( stop, 0 ) );
checkCUDA( cudaEventSynchronize( stop ) );
float elapsedTime;
checkCUDA( cudaEventElapsedTime( &elapsedTime, start, stop ) );
printf("Total time for %d elements was %f ms\n", N, elapsedTime );
/* copy result back to host */
checkCUDA( cudaMemcpy( out, d_out, size, cudaMemcpyDeviceToHost ) );
int success = 1;
for( int i = 0; i < N; i++ )
{
if( in[i]*( (double)RADIUS*2+1 ) != out[i] )
{
printf("error in element %d in = %f out %f\n",i,in[i],out[i] );
success = 0;
break;
} /* end if */
} /* end for */
if( success == 1 ) printf("PASS\n");
else printf("FAIL\n");
/* clean up */
free(in);
free(out);
checkCUDA( cudaFree( d_in ) );
checkCUDA( cudaFree( d_out ) );
checkCUDA( cudaDeviceSynchronize() );
return 0;
} /* end main */
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.