hip_filename
stringlengths
5
84
hip_content
stringlengths
79
9.69M
cuda_filename
stringlengths
4
83
cuda_content
stringlengths
19
9.69M
605170f66fe78072e7f5e37850fd22b1be42247e.hip
// !!! This is a file automatically generated by hipify!!! /* Define the kernels to be used for comparison. There are two types of kernels: 1) Addition of two float arrays and store into a third one. 2) Calculating the tanh() of each element of two float arrays and storing into a third one. Some of the kernels work with linear indices only. However, the arrays tested are 2D. */ #include "definitions.h" #include "hip/hip_runtime.h" #include "device_launch_parameters.h" /******************************************************/ extern "C" __global__ void addKernel(real *c, const real *a, const real *b, const size_t rows, const size_t cols, const real dt) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; size_t j = threadIdx.y + blockDim.y * blockIdx.y; if (i < cols && j < rows) { size_t idx = j * cols + i; c[idx] = c[idx] + dt * (a[idx] + b[idx]); } } /******************************************************/ extern "C" __global__ void modifyKernel(real* d, const real* a, const real* b, const real* c, const real dt, const size_t rows, const size_t cols) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; size_t j = threadIdx.y + blockDim.y * blockIdx.y; if (i < cols && j < rows) { size_t idx = j * cols + i; d[idx] = c[idx] * c[idx] + dt * (tanh(a[idx]) + tanh(b[idx])) / real(2.0); } } /******************************************************/ extern "C" __global__ void addKernel_part(real* c, const real* a, const real* b, const size_t rows, const size_t cols, const size_t stride, const real dt) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; size_t j = threadIdx.y + blockDim.y * blockIdx.y; if (i < cols && j < rows) { size_t idx = j * stride + i; c[idx] = c[idx] + dt * (a[idx] + b[idx]); } } /******************************************************/ extern "C" __global__ void modifyKernel_part(real* d, const real* a, const real* b, const real* c, const real dt, const size_t rows, const size_t cols, const size_t stride) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; size_t j = threadIdx.y + blockDim.y * blockIdx.y; if (i < cols && j < rows) { size_t idx = j * stride + i; d[idx] = c[idx] * c[idx] + dt * (tanh(a[idx]) + tanh(b[idx])) / real(2.0); } }
605170f66fe78072e7f5e37850fd22b1be42247e.cu
/* Define the kernels to be used for comparison. There are two types of kernels: 1) Addition of two float arrays and store into a third one. 2) Calculating the tanh() of each element of two float arrays and storing into a third one. Some of the kernels work with linear indices only. However, the arrays tested are 2D. */ #include "definitions.h" #include "cuda_runtime.h" #include "device_launch_parameters.h" /******************************************************/ extern "C" __global__ void addKernel(real *c, const real *a, const real *b, const size_t rows, const size_t cols, const real dt) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; size_t j = threadIdx.y + blockDim.y * blockIdx.y; if (i < cols && j < rows) { size_t idx = j * cols + i; c[idx] = c[idx] + dt * (a[idx] + b[idx]); } } /******************************************************/ extern "C" __global__ void modifyKernel(real* d, const real* a, const real* b, const real* c, const real dt, const size_t rows, const size_t cols) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; size_t j = threadIdx.y + blockDim.y * blockIdx.y; if (i < cols && j < rows) { size_t idx = j * cols + i; d[idx] = c[idx] * c[idx] + dt * (tanh(a[idx]) + tanh(b[idx])) / real(2.0); } } /******************************************************/ extern "C" __global__ void addKernel_part(real* c, const real* a, const real* b, const size_t rows, const size_t cols, const size_t stride, const real dt) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; size_t j = threadIdx.y + blockDim.y * blockIdx.y; if (i < cols && j < rows) { size_t idx = j * stride + i; c[idx] = c[idx] + dt * (a[idx] + b[idx]); } } /******************************************************/ extern "C" __global__ void modifyKernel_part(real* d, const real* a, const real* b, const real* c, const real dt, const size_t rows, const size_t cols, const size_t stride) { size_t i = threadIdx.x + blockDim.x * blockIdx.x; size_t j = threadIdx.y + blockDim.y * blockIdx.y; if (i < cols && j < rows) { size_t idx = j * stride + i; d[idx] = c[idx] * c[idx] + dt * (tanh(a[idx]) + tanh(b[idx])) / real(2.0); } }
4c24be516802870c7c3bbf12fbd66126775783e6.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include <fstream> #include <string> #include <sstream> #include <ios> #include <vector> #include <set> #include <hip/hip_runtime.h> #include <hip/hip_vector_types.h> #include <device_launch_parameters.h> #include <hiprand/hiprand.h> #include <hiprand/hiprand_kernel.h> #include <chrono> #include <float.h> #include "lodepng.h" // super easy read and write for png images #include "cutil_math.h" //#define DISPERSION // return min and max components of a vector inline __host__ __device__ float3 minf3(float3 a, float3 b){ return make_float3(a.x<b.x?a.x:b.x, a.y<b.y?a.y:b.y, a.z<b.z?a.z:b.z); } inline __host__ __device__ float3 maxf3(float3 a, float3 b){ return make_float3(a.x>b.x?a.x:b.x, a.y>b.y?a.y:b.y, a.z>b.z?a.z:b.z); } //min and max of two floats inline __device__ float minf(float a, float b){return a<b?a:b;} inline __device__ float maxf(float a, float b){return a>b?a:b;} // in case something goes wrong void print_error(std::string message){ std::cerr << "Error: " << message << std::endl; exit(1); } // variables on host int h_width; int h_height; int h_samples; int bloom_rad; __constant__ float3* textures; #include "geometry.h" #include "bvh.h" #include "read_scene.h" // variables and geometry on device __constant__ int width; __constant__ int height; __constant__ int samples; __constant__ const_text* const_textures; __constant__ check_text* check_textures; __constant__ pix_text* pix_textures; __constant__ Sphere* spheres; __constant__ Box* boxes; __constant__ Triangle* all_triangles; __constant__ uint num_triangles; __constant__ compact_BVHnode* bvh; __constant__ uint num_nodes; __constant__ uint num_spheres; __constant__ uint num_boxes; // copy scene info to device void init_scene(){ read_scene(); // read from file create_compact_BVH(); const_text* d_const; hipMalloc((void**)(&d_const), h_const.size()*sizeof(const_text)); hipMemcpy((void*)(d_const), (void*)(&(h_const[0])), h_const.size()*sizeof(const_text), hipMemcpyHostToDevice); check_text* d_check; hipMalloc((void**)(&d_check), h_check.size()*sizeof(check_text)); hipMemcpy((void*)(d_check), (void*)(&(h_check[0])), h_check.size()*sizeof(check_text), hipMemcpyHostToDevice); pix_text* d_pix; hipMalloc((void**)(&d_pix), h_pix.size()*sizeof(pix_text)); hipMemcpy((void*)(d_pix), (void*)(&(h_pix[0])), h_pix.size()*sizeof(pix_text), hipMemcpyHostToDevice); float3* d_text; hipMalloc(&d_text, h_textures.size()*sizeof(float3)); hipMemcpy(d_text, &(h_textures[0]), h_textures.size()*sizeof(float3), hipMemcpyHostToDevice); Sphere* d_spheres; hipMalloc((void**)(&d_spheres), h_spheres.size()*sizeof(Sphere)); hipMemcpy((void*)(d_spheres), (void*)(&(h_spheres[0])), h_spheres.size()*sizeof(Sphere), hipMemcpyHostToDevice); Box* d_boxes; hipMalloc((void**)(&d_boxes), h_boxes.size()*sizeof(Box)); hipMemcpy((void*)(d_boxes), (void*)(&(h_boxes[0])), h_boxes.size()*sizeof(Box), hipMemcpyHostToDevice); Triangle* d_triangles; hipMalloc((void**)(&d_triangles), triangles_ordered.size()*sizeof(Triangle)); hipMemcpy((void*)(d_triangles), (void*)(&(triangles_ordered[0])), triangles_ordered.size()*sizeof(Triangle), hipMemcpyHostToDevice); compact_BVHnode* d_bvh; hipMalloc((void**)(&d_bvh), compact_BVH.size()*sizeof(compact_BVHnode)); hipMemcpy((void*)(d_bvh), (void*)(&(compact_BVH[0])), compact_BVH.size()*sizeof(compact_BVHnode), hipMemcpyHostToDevice); hipMemcpyToSymbol(const_textures, &d_const, sizeof(const_text*)); hipMemcpyToSymbol(check_textures, &d_check, sizeof(check_text*)); hipMemcpyToSymbol(pix_textures, &d_pix, sizeof(pix_text*)); hipMemcpyToSymbol(textures, &d_text, sizeof(float3*)); hipMemcpyToSymbol(spheres, &d_spheres, sizeof(Sphere*)); hipMemcpyToSymbol(boxes, &d_boxes, sizeof(Box*)); hipMemcpyToSymbol(all_triangles, &d_triangles, sizeof(Triangle*)); hipMemcpyToSymbol(bvh, &d_bvh, sizeof(compact_BVHnode*)); uint* d_spheres_size; uint* d_boxes_size; uint* d_triangles_size; uint* d_bvh_size; uint ss = h_spheres.size(); uint sb = h_boxes.size(); uint st = triangles_ordered.size(); uint sh = compact_BVH.size(); hipMalloc((void**)(&d_spheres_size), sizeof(int)); hipMalloc((void**)(&d_boxes_size), sizeof(int)); hipMalloc((void**)(&d_triangles_size), sizeof(int)); hipMalloc((void**)(&d_bvh_size), sizeof(int)); hipMemcpy((void*)d_spheres_size, &ss, sizeof(int), hipMemcpyHostToDevice); hipMemcpy((void*)d_boxes_size, &sb, sizeof(int), hipMemcpyHostToDevice); hipMemcpy((void*)d_triangles_size, &st, sizeof(int), hipMemcpyHostToDevice); hipMemcpy((void*)d_bvh_size, &sh, sizeof(int), hipMemcpyDeviceToHost); hipMemcpyToSymbol(num_spheres, d_spheres_size, sizeof(int)); hipMemcpyToSymbol(num_boxes, d_boxes_size, sizeof(int)); hipMemcpyToSymbol(num_triangles, d_triangles_size, sizeof(int)); hipMemcpyToSymbol(num_nodes, d_bvh_size, sizeof(int)); uint* d_width; uint* d_height; uint* d_samples; hipMalloc((void**)(&d_width), sizeof(int)); hipMalloc((void**)(&d_height), sizeof(int)); hipMalloc((void**)(&d_samples), sizeof(int)); hipMemcpy(d_width, &h_width, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_height, &h_height, sizeof(int), hipMemcpyHostToDevice); hipMemcpy(d_samples, &h_samples, sizeof(int), hipMemcpyHostToDevice); hipMemcpyToSymbol(width, d_width, sizeof(int)); hipMemcpyToSymbol(height, d_height, sizeof(int)); hipMemcpyToSymbol(samples, d_samples, sizeof(int)); std::cout << "Initialized " << triangles_ordered.size() << " triangles" << std::endl; } // recursively intersect ray with bvh to find triangle in sublinear time // I really need to give each mesh a separate bvh but I haven't yet __device__ void intersect_triangles(const Ray& r_in, float& t, float& dist, int& id){ int stack[64]; // its reasonable to assume this will be way bigger than neccesary int stack_idx = 1; stack[0] = 0; float d; float tb = -1e20; // large negative while(stack_idx){ int boxidx = stack[stack_idx - 1]; // pop off top of stack stack_idx --; if(!(bvh[boxidx].u.leaf.count & 0x80000000)){ // inner Box b; b.min = bvh[boxidx].min; b.max = bvh[boxidx].max; if (b.intersect(r_in)){ stack[stack_idx++] = bvh[boxidx].u.inner.left; // push right and left onto stack stack[stack_idx++] = bvh[boxidx].u.inner.right; } } else{ // leaf for (int i = bvh[boxidx].u.leaf.offset; i < bvh[boxidx].u.leaf.offset + (bvh[boxidx].u.leaf.count & 0x7fffffff); i++){ // intersect all triangles in this box if ((d = all_triangles[i].intersect(r_in)) && d > -1e19){ if(d<t && d>0.001){ t=d; id = i; } else if(d>tb && d<0.001){ tb = d; } } } } } dist = t - tb; if (tb < -1e19) dist = 0; // nothing intersected } // find first thing that the ray hits __device__ bool intersect_scene(const Ray& r_in, Hit_data& dat){ int n = num_spheres; float t = 1e20; float d; int id; dat.dist = 1; dat.t = 1e20; float dist, dist_save; for(int i = int(n); i--; ){ // intersect spheres if ((d = spheres[i].intersect(r_in, dist)) && d<t && d > 0){ t = d; id = i; dist_save = dist; } } // only update dat at most once per geometry type if (t < dat.t){ dat.p = r_in.origin + t*r_in.direction; dat.t = t; dat.scat = spheres[id].scat; dat.text = spheres[id].text; dat.normal = normalize(dat.p - spheres[id].center); dat.texture_num = spheres[id].texture_num; dat.uv = make_float2(0.5 + atan(dat.normal.z/dat.normal.x)/2/M_PI, 0.5 - asin(dat.normal.y)/2/M_PI); dat.shift = spheres[id].radius/100000; t = dat.t; if (dist_save < 1e19) dat.dist = dist_save; else dat.dist = 0; // shouldn't happen. means nothing hit } n = num_boxes; for(int i = int(n); i--; ){ // intersect boxes if ((d = boxes[i].intersect(r_in)) && d<t && d>0){ t=d; id = i; } } if (t<dat.t){ dat.p = r_in.origin + t*r_in.direction; dat.t = t; dat.scat = boxes[id].scat; dat.text = boxes[id].text; dat.normal = boxes[id].normal_at(r_in.origin + t*r_in.direction); dat.texture_num = boxes[id].texture_num; dat.uv = boxes[id].uv_at(dat.p); dat.shift = 0; t = dat.t; } intersect_triangles(r_in, t, dist, id); if (t<dat.t){ dat.p = r_in.origin + t*r_in.direction; dat.t = t; dat.scat = all_triangles[id].scat; dat.text = all_triangles[id].text; dat.normal = all_triangles[id].normal; dat.texture_num = all_triangles[id].texture_num; dat.uv = all_triangles[id].uv_at(dat.p); dat.dist = dist; dat.shift = 0; } return dat.t<1e20; // return whether anying was hit } __device__ float3 radiance(Ray& r_in, hiprandState_t* randstate){ float3 color = make_float3(0.0, 0.0, 0.0); //accumulated total light float3 mask; //accumulated color mask float idx_refr; #ifdef DISPERSION // do each rgb separately // very subtle for most scenes and very slow if (r_in.light == RED){ mask = make_float3(1.0,0.0,0.0); idx_refr = 1.5; } else if (r_in.light == GREEN){ mask = make_float3(0.0,1.0,0.0); idx_refr = 1.52; } else{ mask = make_float3(0.0,0.0,1.0); idx_refr = 1.54; } #else mask = make_float3(1.0,1.0,1.0); idx_refr = 1.5; #endif // loop is faster than recursion and easy to do here for (int bounces = 0; bounces < 40; ++bounces){ Hit_data dat; if(!intersect_scene(r_in, dat)){ // if ray doesn't hit anything float t = (r_in.direction.y + 1)/2; return color + mask*(make_float3(1)*(1-t) + make_float3(0.5,0.7,1)*t); // gradient sky } // normal pointing in correct way float3 nl = dot(dat.normal, r_in.direction) < 0 ? dat.normal : dat.normal*-1; float3 d; float3 col; float3 emission; float dist_traveled = dat.t; // for everything except volumes float3 atten = r_in.attenuation; // color doesn't depend on point if(dat.text == CONST){ col = const_textures[dat.texture_num].color; emission = const_textures[dat.texture_num].emission; } // checkered using uv map if(dat.text == CHECK){ check_text ct = check_textures[dat.texture_num]; emission = ct.emission; float n = sinf(dat.uv.x*ct.scale.x)*sinf(dat.uv.y*ct.scale.y); if (n<0) col = ct.color1; else col = ct.color2; } // image texture using uv if(dat.text == PIX){ pix_text pt = pix_textures[dat.texture_num]; col = pt.color_at(dat.uv); } // perfectly diffuse if(dat.scat == DIFF){\ // pick random direction float theta = 2*M_PI*hiprand_uniform(randstate); float cosphi = hiprand_uniform(randstate); float sinphi = sqrtf(1-cosphi*cosphi); float3 w = nl; float3 u = normalize(cross((fabs(w.x) > 0.0001 ? make_float3(0,1,0) : make_float3(1,0,0)), w)); float3 v = cross(w,u); //rotate new ray to correct hemisphere d = normalize(u*cosf(theta)*sinphi + v*sinf(theta)*sinphi + w*cosphi); } // perfectly specular reflection if (dat.scat == SPEC){ d = r_in.direction - 2.0*dat.normal*dot(dat.normal,r_in.direction); } // either perfect reflection or perfect refraction depending on Schlick's approxomation if (dat.scat == REFR){ float3 reflected = r_in.direction - 2.0*dat.normal*dot(dat.normal,r_in.direction); float ratio; float3 refracted; float reflect_prob; float cosine; if(dot(r_in.direction, dat.normal) > 0){// ray is entering ratio = idx_refr; cosine = idx_refr*dot(r_in.direction, dat.normal); } else{ // ray is leaving ratio = 1.0/idx_refr; cosine = -dot(r_in.direction, dat.normal); } // Schlick's approxomation float dt = dot(r_in.direction, nl); float disc = 1.0 - ratio*ratio*(1-dt*dt); if(disc > 0){ refracted = ratio*(r_in.direction - dt*nl) - sqrtf(disc)*nl; float r0 = (1 - idx_refr) / (1 + idx_refr); r0 = r0*r0; float c = 1-cosine; reflect_prob = r0+(1-r0)*c*c*c*c*c; } else{ reflect_prob = 1.0; } if(hiprand_uniform(randstate) < reflect_prob) d = reflected; else{ d = refracted; // change media attenuation color if (dot(r_in.direction, dat.normal) < 0){ //entering r_in.attenuation.x = col.x; r_in.attenuation.y = col.y; r_in.attenuation.z = col.z; } else{//leaving r_in.attenuation.x = 1; r_in.attenuation.y = 1; r_in.attenuation.z = 1; } dat.shift = 0; } col = make_float3(1); } // metal material from Peter Shirley's Ray Tracing in one Weekend if(dat.scat == METAL){ float phi = 2*M_PI*hiprand_uniform(randstate); float r = hiprand_uniform(randstate); float exponent = 10; float cosTheta = powf(1-r, 1.0/(exponent+1)); float sinTheta = sqrtf(1-cosTheta*cosTheta); float3 w = normalize(r_in.direction - 2.0*dat.normal*dot(dat.normal,r_in.direction)); float3 u = cross((fabs(w.x) > 0.001?make_float3(0,1,0):make_float3(1,0,0)),w); float3 v = cross(w,u); d = normalize(u*cosf(phi)*sinTheta + v*sinf(phi)*sinTheta + w*cosTheta); } // like glass except diffuse instead of refract // looks like glazed ceramic if (dat.scat == COAT){ // Schlick's approxomation float c = 1 + idx_refr*dot(r_in.direction, nl); float r0 = (1 - idx_refr) / (1 + idx_refr); r0 = r0*r0; float reflect_prob = r0+(1-r0)*c*c*c*c*c; if(hiprand_uniform(randstate)<reflect_prob){ d = r_in.direction - 2.0*dat.normal*dot(dat.normal,r_in.direction); col = make_float3(1); } else{ float theta = 2*M_PI*hiprand_uniform(randstate); float cosphi = hiprand_uniform(randstate); float sinphi = sqrtf(1-cosphi*cosphi); float3 w = nl; float3 u = normalize(cross((fabs(w.x) > 0.0001 ? make_float3(0,1,0) : make_float3(1,0,0)), w)); float3 v = cross(w,u); d = normalize(u*cosf(theta)*sinphi + v*sinf(theta)*sinphi + w*cosphi); } } // calculate distance traveled based on particle density if (dat.scat == VOLUME){ float density = const_textures[dat.texture_num].density; float dist = -(1/density)*logf(hiprand_uniform(randstate)); if(dot(r_in.direction, dat.normal) > 0){ // ray started inside if (dist < dat.t){ // ray ends inside // reflect in random direction float theta = hiprand_uniform(randstate)*2*M_PI; float cosphi = 2*hiprand_uniform(randstate) - 1; float sinphi = sqrtf(1 - cosphi*cosphi); d = normalize(make_float3(cosf(theta)*sinphi, sinf(theta)*sinphi, cosphi)); dat.p = r_in.origin + dist*r_in.direction; // origin of new ray dist_traveled = dist; // ray didn't go all the way to intersection } else{ // continue in same direction d = r_in.direction; col = make_float3(1); } } else{ // ray started outside d = r_in.direction; col = make_float3(1); } dat.shift = 0; // dont move ray origin } // attenuation due to media float r = __expf(-1*(1-atten.x)*dist_traveled); float g = __expf(-1*(1-atten.y)*dist_traveled); float b = __expf(-1*(1-atten.z)*dist_traveled); mask = mask*make_float3(r,g,b); color += mask*emission; // if object emits light add to total mask = mask * col; // update color mask // new ray r_in.origin = dat.p + nl*dat.shift; r_in.direction = d; if (mask.x + mask.y + mask.z < 0.01) break; // if ray has lost most energy no need to continue } return color; } // call from host __global__ void render_kernel(float3 *output){ // coordinates of pixel uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; // index in output array uint i = (height - y - 1)*width + x; uint threadId = (blockIdx.x+blockIdx.y*gridDim.x)* (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x; hiprandState_t randstate; hiprand_init(threadId,0,0,&randstate); //initialize host //eventually I would like to make this interactive so this camera stuff is here float3 cam = make_float3(0,15,40); float3 look_at = make_float3(0,15,0); float3 w = normalize(cam - look_at); float3 u = cross(make_float3(0,1,0),w); float3 v = cross(w,u); float focal_length = length(cam - look_at); float aperture = 20; float lens_radius = 0.5; float he = tanf(aperture/2); float wi = he*width/height; float3 screen_corner = cam - wi*focal_length*u - he*focal_length*v - focal_length*w; float3 horiz = 2*wi*focal_length*u; float3 vert = 2*he*focal_length*v; float3 r = make_float3(0.0); float3 d; for (int s = 0; s<samples; ++s){ // new ray for each sample float theta = 2*M_PI*hiprand_uniform(&randstate); float rad = lens_radius*hiprand_uniform(&randstate); float3 from = cam + rad*(u*cosf(theta) + v*sinf(theta)); // slight random for depth of field float xs = hiprand_uniform(&randstate); float ys = hiprand_uniform(&randstate); d = normalize(screen_corner + horiz*(float(x+xs)/float(width)) + vert*(float(y+ys)/float(height)) - from); light_type l = ((s%3==0)?RED:((s%3==1)?GREEN:BLUE)); // only used for dispersion Ray init_ray = Ray(from, d, make_float3(1,1,1), l); r = r + radiance(init_ray,&randstate); } r = r/samples; #ifdef DISPERSION r = r*3; #endif output[i] = r; } // utilities to turn light vals into image inline float clamp(float x){return x<0.0? 0.0: x>1.0 ? 1.0 : x;} inline int to_int(float x){return int(pow(clamp(x), 1/2.2)*255 + 0.5);} // apply gausian blur to very bright pixels to get a bloom effect void bloom(float3* in, float3* out, int radius, float stddev){ float kernel[(2*radius+1)*(2*radius+1)]; float denom = 2*stddev*stddev; for (int y = -radius; y<=radius; y++){ for (int x = -radius; x<=radius; x++){ kernel[(y+radius)*(2*radius+1)+x+radius] = exp(-(x*x+y*y)/denom)/denom/M_PI; } } float3* temp = new float3[h_height*h_width]; for (int i = 0; i<h_width*h_height; i++){ float r,g,b; r=g=b=0; if (in[i].x > 1) r = in[i].x - 1; if (in[i].y > 1) g = in[i].y - 1; if (in[i].z > 1) b = in[i].z - 1; temp[i] = make_float3(r,g,b); } for (int y = 0; y<h_height; y++){ for(int x = 0; x<h_width; x++){ float3 c = make_float3(0); for (int ky = -radius; ky<=radius; ky++){ for (int kx = -radius; kx<=radius; kx++){ if ((y+ky)>=0 && (y+ky)<h_height && (x+kx)>=0 && (x+kx)<h_width){ c = c+temp[(y+ky)*h_width+(x+kx)]*kernel[(ky+radius)*(2*radius+1)+kx+radius]; } } } out[y*h_width+x] = c/20 + in[y*h_width + x]; } } delete[] temp; } int main(){ // time the various parts of computation std::chrono::time_point<std::chrono::system_clock> t0,t1,t2,t3; t0 = std::chrono::system_clock::now(); init_scene(); float3* output_h = new float3[h_width*h_height]; float3* output_d; hipMalloc(&output_d, h_width*h_height*sizeof(float3)); dim3 block(8,8,1); dim3 grid(h_width/block.x, h_height/block.y, 1); std::cout << "CUDA initialized" << std::endl; std::cout << "Start rendering..." << std::endl; t1 = std::chrono::system_clock::now(); hipLaunchKernelGGL(( render_kernel), dim3(grid), dim3(block), 0, 0, output_d); hipDeviceSynchronize(); hipMemcpy(output_h, output_d, h_width*h_height*sizeof(float3), hipMemcpyDeviceToHost); hipFree(output_d); t2 = std::chrono::system_clock::now(); std::cout << "Done" << std::endl; float3* out = new float3[h_width*h_height]; bloom(output_h, out, bloom_rad, 3); std::vector<unsigned char> image(h_width*h_height*4); for(int y = 0; y<h_height; y++){ for(int x = 0; x<h_width; x++){ int idx = (y*h_width + x); image[4*idx + 0] = to_int(out[idx].x); image[4*idx + 1] = to_int(out[idx].y); image[4*idx + 2] = to_int(out[idx].z); image[4*idx + 3] = 255; } } lodepng::encode("test.png", image, h_width, h_height); // save image std::cout << "Saved image" << std::endl; delete[] output_h; t3 = std::chrono::system_clock::now(); std::chrono::duration<double> init_time = t1 - t0; std::chrono::duration<double> kernel_time = t2 - t1; std::chrono::duration<double> post_time = t3 - t2; double is = init_time.count(); double ks = kernel_time.count(); double ps = post_time.count(); int im = is/60; int km = ks/60; int pm = ps/60; is -= 60*im; ks -= 60*km; ps -= 60*pm; std::cout << std::endl; std::cout << "Initialization time: " << im << "m" << is << "s" << std::endl; std::cout << "Render time: " << km << "m" << ks << "s" << std::endl; std::cout << "Post process time: " << pm << "m" << ps << "s" << std::endl; return 0; }
4c24be516802870c7c3bbf12fbd66126775783e6.cu
#include <iostream> #include <fstream> #include <string> #include <sstream> #include <ios> #include <vector> #include <set> #include <cuda_runtime.h> #include <vector_types.h> #include <device_launch_parameters.h> #include <curand.h> #include <curand_kernel.h> #include <chrono> #include <float.h> #include "lodepng.h" // super easy read and write for png images #include "cutil_math.h" //#define DISPERSION // return min and max components of a vector inline __host__ __device__ float3 minf3(float3 a, float3 b){ return make_float3(a.x<b.x?a.x:b.x, a.y<b.y?a.y:b.y, a.z<b.z?a.z:b.z); } inline __host__ __device__ float3 maxf3(float3 a, float3 b){ return make_float3(a.x>b.x?a.x:b.x, a.y>b.y?a.y:b.y, a.z>b.z?a.z:b.z); } //min and max of two floats inline __device__ float minf(float a, float b){return a<b?a:b;} inline __device__ float maxf(float a, float b){return a>b?a:b;} // in case something goes wrong void print_error(std::string message){ std::cerr << "Error: " << message << std::endl; exit(1); } // variables on host int h_width; int h_height; int h_samples; int bloom_rad; __constant__ float3* textures; #include "geometry.h" #include "bvh.h" #include "read_scene.h" // variables and geometry on device __constant__ int width; __constant__ int height; __constant__ int samples; __constant__ const_text* const_textures; __constant__ check_text* check_textures; __constant__ pix_text* pix_textures; __constant__ Sphere* spheres; __constant__ Box* boxes; __constant__ Triangle* all_triangles; __constant__ uint num_triangles; __constant__ compact_BVHnode* bvh; __constant__ uint num_nodes; __constant__ uint num_spheres; __constant__ uint num_boxes; // copy scene info to device void init_scene(){ read_scene(); // read from file create_compact_BVH(); const_text* d_const; cudaMalloc((void**)(&d_const), h_const.size()*sizeof(const_text)); cudaMemcpy((void*)(d_const), (void*)(&(h_const[0])), h_const.size()*sizeof(const_text), cudaMemcpyHostToDevice); check_text* d_check; cudaMalloc((void**)(&d_check), h_check.size()*sizeof(check_text)); cudaMemcpy((void*)(d_check), (void*)(&(h_check[0])), h_check.size()*sizeof(check_text), cudaMemcpyHostToDevice); pix_text* d_pix; cudaMalloc((void**)(&d_pix), h_pix.size()*sizeof(pix_text)); cudaMemcpy((void*)(d_pix), (void*)(&(h_pix[0])), h_pix.size()*sizeof(pix_text), cudaMemcpyHostToDevice); float3* d_text; cudaMalloc(&d_text, h_textures.size()*sizeof(float3)); cudaMemcpy(d_text, &(h_textures[0]), h_textures.size()*sizeof(float3), cudaMemcpyHostToDevice); Sphere* d_spheres; cudaMalloc((void**)(&d_spheres), h_spheres.size()*sizeof(Sphere)); cudaMemcpy((void*)(d_spheres), (void*)(&(h_spheres[0])), h_spheres.size()*sizeof(Sphere), cudaMemcpyHostToDevice); Box* d_boxes; cudaMalloc((void**)(&d_boxes), h_boxes.size()*sizeof(Box)); cudaMemcpy((void*)(d_boxes), (void*)(&(h_boxes[0])), h_boxes.size()*sizeof(Box), cudaMemcpyHostToDevice); Triangle* d_triangles; cudaMalloc((void**)(&d_triangles), triangles_ordered.size()*sizeof(Triangle)); cudaMemcpy((void*)(d_triangles), (void*)(&(triangles_ordered[0])), triangles_ordered.size()*sizeof(Triangle), cudaMemcpyHostToDevice); compact_BVHnode* d_bvh; cudaMalloc((void**)(&d_bvh), compact_BVH.size()*sizeof(compact_BVHnode)); cudaMemcpy((void*)(d_bvh), (void*)(&(compact_BVH[0])), compact_BVH.size()*sizeof(compact_BVHnode), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(const_textures, &d_const, sizeof(const_text*)); cudaMemcpyToSymbol(check_textures, &d_check, sizeof(check_text*)); cudaMemcpyToSymbol(pix_textures, &d_pix, sizeof(pix_text*)); cudaMemcpyToSymbol(textures, &d_text, sizeof(float3*)); cudaMemcpyToSymbol(spheres, &d_spheres, sizeof(Sphere*)); cudaMemcpyToSymbol(boxes, &d_boxes, sizeof(Box*)); cudaMemcpyToSymbol(all_triangles, &d_triangles, sizeof(Triangle*)); cudaMemcpyToSymbol(bvh, &d_bvh, sizeof(compact_BVHnode*)); uint* d_spheres_size; uint* d_boxes_size; uint* d_triangles_size; uint* d_bvh_size; uint ss = h_spheres.size(); uint sb = h_boxes.size(); uint st = triangles_ordered.size(); uint sh = compact_BVH.size(); cudaMalloc((void**)(&d_spheres_size), sizeof(int)); cudaMalloc((void**)(&d_boxes_size), sizeof(int)); cudaMalloc((void**)(&d_triangles_size), sizeof(int)); cudaMalloc((void**)(&d_bvh_size), sizeof(int)); cudaMemcpy((void*)d_spheres_size, &ss, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy((void*)d_boxes_size, &sb, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy((void*)d_triangles_size, &st, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy((void*)d_bvh_size, &sh, sizeof(int), cudaMemcpyDeviceToHost); cudaMemcpyToSymbol(num_spheres, d_spheres_size, sizeof(int)); cudaMemcpyToSymbol(num_boxes, d_boxes_size, sizeof(int)); cudaMemcpyToSymbol(num_triangles, d_triangles_size, sizeof(int)); cudaMemcpyToSymbol(num_nodes, d_bvh_size, sizeof(int)); uint* d_width; uint* d_height; uint* d_samples; cudaMalloc((void**)(&d_width), sizeof(int)); cudaMalloc((void**)(&d_height), sizeof(int)); cudaMalloc((void**)(&d_samples), sizeof(int)); cudaMemcpy(d_width, &h_width, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_height, &h_height, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpy(d_samples, &h_samples, sizeof(int), cudaMemcpyHostToDevice); cudaMemcpyToSymbol(width, d_width, sizeof(int)); cudaMemcpyToSymbol(height, d_height, sizeof(int)); cudaMemcpyToSymbol(samples, d_samples, sizeof(int)); std::cout << "Initialized " << triangles_ordered.size() << " triangles" << std::endl; } // recursively intersect ray with bvh to find triangle in sublinear time // I really need to give each mesh a separate bvh but I haven't yet __device__ void intersect_triangles(const Ray& r_in, float& t, float& dist, int& id){ int stack[64]; // its reasonable to assume this will be way bigger than neccesary int stack_idx = 1; stack[0] = 0; float d; float tb = -1e20; // large negative while(stack_idx){ int boxidx = stack[stack_idx - 1]; // pop off top of stack stack_idx --; if(!(bvh[boxidx].u.leaf.count & 0x80000000)){ // inner Box b; b.min = bvh[boxidx].min; b.max = bvh[boxidx].max; if (b.intersect(r_in)){ stack[stack_idx++] = bvh[boxidx].u.inner.left; // push right and left onto stack stack[stack_idx++] = bvh[boxidx].u.inner.right; } } else{ // leaf for (int i = bvh[boxidx].u.leaf.offset; i < bvh[boxidx].u.leaf.offset + (bvh[boxidx].u.leaf.count & 0x7fffffff); i++){ // intersect all triangles in this box if ((d = all_triangles[i].intersect(r_in)) && d > -1e19){ if(d<t && d>0.001){ t=d; id = i; } else if(d>tb && d<0.001){ tb = d; } } } } } dist = t - tb; if (tb < -1e19) dist = 0; // nothing intersected } // find first thing that the ray hits __device__ bool intersect_scene(const Ray& r_in, Hit_data& dat){ int n = num_spheres; float t = 1e20; float d; int id; dat.dist = 1; dat.t = 1e20; float dist, dist_save; for(int i = int(n); i--; ){ // intersect spheres if ((d = spheres[i].intersect(r_in, dist)) && d<t && d > 0){ t = d; id = i; dist_save = dist; } } // only update dat at most once per geometry type if (t < dat.t){ dat.p = r_in.origin + t*r_in.direction; dat.t = t; dat.scat = spheres[id].scat; dat.text = spheres[id].text; dat.normal = normalize(dat.p - spheres[id].center); dat.texture_num = spheres[id].texture_num; dat.uv = make_float2(0.5 + atan(dat.normal.z/dat.normal.x)/2/M_PI, 0.5 - asin(dat.normal.y)/2/M_PI); dat.shift = spheres[id].radius/100000; t = dat.t; if (dist_save < 1e19) dat.dist = dist_save; else dat.dist = 0; // shouldn't happen. means nothing hit } n = num_boxes; for(int i = int(n); i--; ){ // intersect boxes if ((d = boxes[i].intersect(r_in)) && d<t && d>0){ t=d; id = i; } } if (t<dat.t){ dat.p = r_in.origin + t*r_in.direction; dat.t = t; dat.scat = boxes[id].scat; dat.text = boxes[id].text; dat.normal = boxes[id].normal_at(r_in.origin + t*r_in.direction); dat.texture_num = boxes[id].texture_num; dat.uv = boxes[id].uv_at(dat.p); dat.shift = 0; t = dat.t; } intersect_triangles(r_in, t, dist, id); if (t<dat.t){ dat.p = r_in.origin + t*r_in.direction; dat.t = t; dat.scat = all_triangles[id].scat; dat.text = all_triangles[id].text; dat.normal = all_triangles[id].normal; dat.texture_num = all_triangles[id].texture_num; dat.uv = all_triangles[id].uv_at(dat.p); dat.dist = dist; dat.shift = 0; } return dat.t<1e20; // return whether anying was hit } __device__ float3 radiance(Ray& r_in, curandState* randstate){ float3 color = make_float3(0.0, 0.0, 0.0); //accumulated total light float3 mask; //accumulated color mask float idx_refr; #ifdef DISPERSION // do each rgb separately // very subtle for most scenes and very slow if (r_in.light == RED){ mask = make_float3(1.0,0.0,0.0); idx_refr = 1.5; } else if (r_in.light == GREEN){ mask = make_float3(0.0,1.0,0.0); idx_refr = 1.52; } else{ mask = make_float3(0.0,0.0,1.0); idx_refr = 1.54; } #else mask = make_float3(1.0,1.0,1.0); idx_refr = 1.5; #endif // loop is faster than recursion and easy to do here for (int bounces = 0; bounces < 40; ++bounces){ Hit_data dat; if(!intersect_scene(r_in, dat)){ // if ray doesn't hit anything float t = (r_in.direction.y + 1)/2; return color + mask*(make_float3(1)*(1-t) + make_float3(0.5,0.7,1)*t); // gradient sky } // normal pointing in correct way float3 nl = dot(dat.normal, r_in.direction) < 0 ? dat.normal : dat.normal*-1; float3 d; float3 col; float3 emission; float dist_traveled = dat.t; // for everything except volumes float3 atten = r_in.attenuation; // color doesn't depend on point if(dat.text == CONST){ col = const_textures[dat.texture_num].color; emission = const_textures[dat.texture_num].emission; } // checkered using uv map if(dat.text == CHECK){ check_text ct = check_textures[dat.texture_num]; emission = ct.emission; float n = sinf(dat.uv.x*ct.scale.x)*sinf(dat.uv.y*ct.scale.y); if (n<0) col = ct.color1; else col = ct.color2; } // image texture using uv if(dat.text == PIX){ pix_text pt = pix_textures[dat.texture_num]; col = pt.color_at(dat.uv); } // perfectly diffuse if(dat.scat == DIFF){\ // pick random direction float theta = 2*M_PI*curand_uniform(randstate); float cosphi = curand_uniform(randstate); float sinphi = sqrtf(1-cosphi*cosphi); float3 w = nl; float3 u = normalize(cross((fabs(w.x) > 0.0001 ? make_float3(0,1,0) : make_float3(1,0,0)), w)); float3 v = cross(w,u); //rotate new ray to correct hemisphere d = normalize(u*cosf(theta)*sinphi + v*sinf(theta)*sinphi + w*cosphi); } // perfectly specular reflection if (dat.scat == SPEC){ d = r_in.direction - 2.0*dat.normal*dot(dat.normal,r_in.direction); } // either perfect reflection or perfect refraction depending on Schlick's approxomation if (dat.scat == REFR){ float3 reflected = r_in.direction - 2.0*dat.normal*dot(dat.normal,r_in.direction); float ratio; float3 refracted; float reflect_prob; float cosine; if(dot(r_in.direction, dat.normal) > 0){// ray is entering ratio = idx_refr; cosine = idx_refr*dot(r_in.direction, dat.normal); } else{ // ray is leaving ratio = 1.0/idx_refr; cosine = -dot(r_in.direction, dat.normal); } // Schlick's approxomation float dt = dot(r_in.direction, nl); float disc = 1.0 - ratio*ratio*(1-dt*dt); if(disc > 0){ refracted = ratio*(r_in.direction - dt*nl) - sqrtf(disc)*nl; float r0 = (1 - idx_refr) / (1 + idx_refr); r0 = r0*r0; float c = 1-cosine; reflect_prob = r0+(1-r0)*c*c*c*c*c; } else{ reflect_prob = 1.0; } if(curand_uniform(randstate) < reflect_prob) d = reflected; else{ d = refracted; // change media attenuation color if (dot(r_in.direction, dat.normal) < 0){ //entering r_in.attenuation.x = col.x; r_in.attenuation.y = col.y; r_in.attenuation.z = col.z; } else{//leaving r_in.attenuation.x = 1; r_in.attenuation.y = 1; r_in.attenuation.z = 1; } dat.shift = 0; } col = make_float3(1); } // metal material from Peter Shirley's Ray Tracing in one Weekend if(dat.scat == METAL){ float phi = 2*M_PI*curand_uniform(randstate); float r = curand_uniform(randstate); float exponent = 10; float cosTheta = powf(1-r, 1.0/(exponent+1)); float sinTheta = sqrtf(1-cosTheta*cosTheta); float3 w = normalize(r_in.direction - 2.0*dat.normal*dot(dat.normal,r_in.direction)); float3 u = cross((fabs(w.x) > 0.001?make_float3(0,1,0):make_float3(1,0,0)),w); float3 v = cross(w,u); d = normalize(u*cosf(phi)*sinTheta + v*sinf(phi)*sinTheta + w*cosTheta); } // like glass except diffuse instead of refract // looks like glazed ceramic if (dat.scat == COAT){ // Schlick's approxomation float c = 1 + idx_refr*dot(r_in.direction, nl); float r0 = (1 - idx_refr) / (1 + idx_refr); r0 = r0*r0; float reflect_prob = r0+(1-r0)*c*c*c*c*c; if(curand_uniform(randstate)<reflect_prob){ d = r_in.direction - 2.0*dat.normal*dot(dat.normal,r_in.direction); col = make_float3(1); } else{ float theta = 2*M_PI*curand_uniform(randstate); float cosphi = curand_uniform(randstate); float sinphi = sqrtf(1-cosphi*cosphi); float3 w = nl; float3 u = normalize(cross((fabs(w.x) > 0.0001 ? make_float3(0,1,0) : make_float3(1,0,0)), w)); float3 v = cross(w,u); d = normalize(u*cosf(theta)*sinphi + v*sinf(theta)*sinphi + w*cosphi); } } // calculate distance traveled based on particle density if (dat.scat == VOLUME){ float density = const_textures[dat.texture_num].density; float dist = -(1/density)*logf(curand_uniform(randstate)); if(dot(r_in.direction, dat.normal) > 0){ // ray started inside if (dist < dat.t){ // ray ends inside // reflect in random direction float theta = curand_uniform(randstate)*2*M_PI; float cosphi = 2*curand_uniform(randstate) - 1; float sinphi = sqrtf(1 - cosphi*cosphi); d = normalize(make_float3(cosf(theta)*sinphi, sinf(theta)*sinphi, cosphi)); dat.p = r_in.origin + dist*r_in.direction; // origin of new ray dist_traveled = dist; // ray didn't go all the way to intersection } else{ // continue in same direction d = r_in.direction; col = make_float3(1); } } else{ // ray started outside d = r_in.direction; col = make_float3(1); } dat.shift = 0; // dont move ray origin } // attenuation due to media float r = __expf(-1*(1-atten.x)*dist_traveled); float g = __expf(-1*(1-atten.y)*dist_traveled); float b = __expf(-1*(1-atten.z)*dist_traveled); mask = mask*make_float3(r,g,b); color += mask*emission; // if object emits light add to total mask = mask * col; // update color mask // new ray r_in.origin = dat.p + nl*dat.shift; r_in.direction = d; if (mask.x + mask.y + mask.z < 0.01) break; // if ray has lost most energy no need to continue } return color; } // call from host __global__ void render_kernel(float3 *output){ // coordinates of pixel uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; // index in output array uint i = (height - y - 1)*width + x; uint threadId = (blockIdx.x+blockIdx.y*gridDim.x)* (blockDim.x*blockDim.y) + (threadIdx.y*blockDim.x) + threadIdx.x; curandState randstate; curand_init(threadId,0,0,&randstate); //initialize host //eventually I would like to make this interactive so this camera stuff is here float3 cam = make_float3(0,15,40); float3 look_at = make_float3(0,15,0); float3 w = normalize(cam - look_at); float3 u = cross(make_float3(0,1,0),w); float3 v = cross(w,u); float focal_length = length(cam - look_at); float aperture = 20; float lens_radius = 0.5; float he = tanf(aperture/2); float wi = he*width/height; float3 screen_corner = cam - wi*focal_length*u - he*focal_length*v - focal_length*w; float3 horiz = 2*wi*focal_length*u; float3 vert = 2*he*focal_length*v; float3 r = make_float3(0.0); float3 d; for (int s = 0; s<samples; ++s){ // new ray for each sample float theta = 2*M_PI*curand_uniform(&randstate); float rad = lens_radius*curand_uniform(&randstate); float3 from = cam + rad*(u*cosf(theta) + v*sinf(theta)); // slight random for depth of field float xs = curand_uniform(&randstate); float ys = curand_uniform(&randstate); d = normalize(screen_corner + horiz*(float(x+xs)/float(width)) + vert*(float(y+ys)/float(height)) - from); light_type l = ((s%3==0)?RED:((s%3==1)?GREEN:BLUE)); // only used for dispersion Ray init_ray = Ray(from, d, make_float3(1,1,1), l); r = r + radiance(init_ray,&randstate); } r = r/samples; #ifdef DISPERSION r = r*3; #endif output[i] = r; } // utilities to turn light vals into image inline float clamp(float x){return x<0.0? 0.0: x>1.0 ? 1.0 : x;} inline int to_int(float x){return int(pow(clamp(x), 1/2.2)*255 + 0.5);} // apply gausian blur to very bright pixels to get a bloom effect void bloom(float3* in, float3* out, int radius, float stddev){ float kernel[(2*radius+1)*(2*radius+1)]; float denom = 2*stddev*stddev; for (int y = -radius; y<=radius; y++){ for (int x = -radius; x<=radius; x++){ kernel[(y+radius)*(2*radius+1)+x+radius] = exp(-(x*x+y*y)/denom)/denom/M_PI; } } float3* temp = new float3[h_height*h_width]; for (int i = 0; i<h_width*h_height; i++){ float r,g,b; r=g=b=0; if (in[i].x > 1) r = in[i].x - 1; if (in[i].y > 1) g = in[i].y - 1; if (in[i].z > 1) b = in[i].z - 1; temp[i] = make_float3(r,g,b); } for (int y = 0; y<h_height; y++){ for(int x = 0; x<h_width; x++){ float3 c = make_float3(0); for (int ky = -radius; ky<=radius; ky++){ for (int kx = -radius; kx<=radius; kx++){ if ((y+ky)>=0 && (y+ky)<h_height && (x+kx)>=0 && (x+kx)<h_width){ c = c+temp[(y+ky)*h_width+(x+kx)]*kernel[(ky+radius)*(2*radius+1)+kx+radius]; } } } out[y*h_width+x] = c/20 + in[y*h_width + x]; } } delete[] temp; } int main(){ // time the various parts of computation std::chrono::time_point<std::chrono::system_clock> t0,t1,t2,t3; t0 = std::chrono::system_clock::now(); init_scene(); float3* output_h = new float3[h_width*h_height]; float3* output_d; cudaMalloc(&output_d, h_width*h_height*sizeof(float3)); dim3 block(8,8,1); dim3 grid(h_width/block.x, h_height/block.y, 1); std::cout << "CUDA initialized" << std::endl; std::cout << "Start rendering..." << std::endl; t1 = std::chrono::system_clock::now(); render_kernel<<<grid, block>>>(output_d); cudaDeviceSynchronize(); cudaMemcpy(output_h, output_d, h_width*h_height*sizeof(float3), cudaMemcpyDeviceToHost); cudaFree(output_d); t2 = std::chrono::system_clock::now(); std::cout << "Done" << std::endl; float3* out = new float3[h_width*h_height]; bloom(output_h, out, bloom_rad, 3); std::vector<unsigned char> image(h_width*h_height*4); for(int y = 0; y<h_height; y++){ for(int x = 0; x<h_width; x++){ int idx = (y*h_width + x); image[4*idx + 0] = to_int(out[idx].x); image[4*idx + 1] = to_int(out[idx].y); image[4*idx + 2] = to_int(out[idx].z); image[4*idx + 3] = 255; } } lodepng::encode("test.png", image, h_width, h_height); // save image std::cout << "Saved image" << std::endl; delete[] output_h; t3 = std::chrono::system_clock::now(); std::chrono::duration<double> init_time = t1 - t0; std::chrono::duration<double> kernel_time = t2 - t1; std::chrono::duration<double> post_time = t3 - t2; double is = init_time.count(); double ks = kernel_time.count(); double ps = post_time.count(); int im = is/60; int km = ks/60; int pm = ps/60; is -= 60*im; ks -= 60*km; ps -= 60*pm; std::cout << std::endl; std::cout << "Initialization time: " << im << "m" << is << "s" << std::endl; std::cout << "Render time: " << km << "m" << ks << "s" << std::endl; std::cout << "Post process time: " << pm << "m" << ps << "s" << std::endl; return 0; }
db2f3676b4f53f3f41cfe7f1ce6eb03aa430f194.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <math.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <hip/hip_runtime.h> #include <cv.h> #include <cxcore.h> #include <highgui.h> #include <ctime> #include <cstdlib> #include <iostream> using namespace cv; using namespace std; int threads; int kernel_size; int indexes_count; int blocks_per_grid; //function to convert to string template <typename T> string Str(const T & t) { ostringstream os; os << t; return os.str(); } /* function to safely manage error un cuda memory allocation */ static inline void _safe_cuda_call(hipError_t err, const char* msg, const char* file_name, const int line_number) { if(err != hipSuccess) { fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg,file_name, line_number, hipGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__) /* function to compute blur */ __global__ void blur_img_kernel(short int* dRed, short int* dGreen, short int* dBlue, bool* dEdited, int cols, int rows, int kernel, int blockSize) { //Thread index const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; //variables to allocate the edges of the working thread int startBlockX = xIndex * blockSize; int endBlockX = (xIndex + 1) * blockSize; //variables to allocate the edges of the kernel int startX = 0; int startY = 0; int endX = cols; int endY = rows; //Index to modify int vectIndex = 0; //variable to iterate over the kernel indexes int subindex = 0; //variables to compute pixel sum inside the kernel (Red, Green, Blue) double averageR = 0; double averageG = 0; double averageB = 0; //cumpute the total of pixels (technically kernel) double total = 0; //handling error on overflow because of the last block exceeds number of cols if(endBlockX > cols) endBlockX = cols; //iterate over the block of the working thread for(int i = startBlockX; i < endBlockX; i++) { for(int j = 0; j < rows; j++) { total = 0; subindex = 0; vectIndex = i + (j * cols); averageR = 0; averageG = 0; averageB = 0; if(dEdited[vectIndex] == true) continue; //control if the pixel was modified by another thread //Region compute kernel edges startX = 0; if(i - (kernel - 1) / 2 > 0) startX = i - (kernel - 1) / 2; startY = 0; if(j - (kernel - 1) / 2 > 0) startY = j - (kernel - 1) / 2; endX = cols; if(i + (kernel - 1) / 2 < cols) endX = i + (kernel - 1) / 2; endY = rows; if(j + (kernel - 1) / 2 < rows) endY = j + (kernel - 1) / 2; //End Region //if kernel is equal to 1 return the pixel as it is if(kernel == 1) { averageR = dRed[vectIndex]; averageG = dGreen[vectIndex]; averageB = dBlue[vectIndex]; total = 1; } //iterate over the kernel computing pixel RGB sums and total of pixels for(int k = startX; k <= endX; k++) { for(int k2 = startY; k2 <= endY; k2++) { subindex = k + (k2 * cols); averageR = averageR + (double)dRed[subindex]; averageG = averageG + (double)dGreen[subindex]; averageB = averageB + (double)dBlue[subindex]; total = total + 1; } } //assign average value per channel of color (RGB) dRed[vectIndex] = (short int)(averageR / total); dGreen[vectIndex] = (short int)(averageG / total); dBlue[vectIndex] = (short int)(averageB / total); //flag to check the current pixel as modified dEdited[vectIndex] = true; } } } /* function to allocate memory, call cuda blur fuction and apply to output the blur effect */ void make_blur(const cv::Mat& input, cv::Mat& output) { //input variables int rows = input.rows; int cols = input.cols; int kernel = kernel_size; //size of the image (number of pixels int imgSize = input.rows * input.cols; //number of parallel tasks int indexes = blocks_per_grid * threads; //size of the block taken of the image per task int blockSize = ceil((double)cols / (double)indexes); //variables to store RGB pixels in host memory short int *h_red = new short int[imgSize]; short int *h_green = new short int[imgSize]; short int *h_blue = new short int[imgSize]; //variables to store RGB pixels in device memory short int *d_red, *d_green, *d_blue; //variable to know if a pixel was modified in host memory bool *h_edited = new bool[imgSize]; //variable to know if a pixel was modified in device memory bool *d_edited; Mat inputCopy = input.clone(); //size of a pixel color array int colorSize = sizeof(short int) * imgSize; //size of a boolean array of the image int editedSize = sizeof(bool) * imgSize; //iterate over the image pixel to initialize the data of host memory int index = 0; for(int i = 0; i < cols; i++) { for(int j = 0; j < rows; j++) { index = i + (j * cols); Vec3b vect = inputCopy.at<Vec3b>(Point(i, j)); h_red[index] = (short int)vect[0]; h_green[index] = (short int)vect[1]; h_blue[index] = (short int)vect[2]; h_edited[index] = false; } } inputCopy.release(); //Region allocate host memory data in device memory SAFE_CALL(hipMalloc<short int>(&d_red, colorSize), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<short int>(&d_green, colorSize), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<short int>(&d_blue, colorSize), "CUDA Malloc Failed"); SAFE_CALL(hipMalloc<bool>(&d_edited, editedSize), "CUDA Malloc Failed"); //End Region //Region copy data from OpenCV input image to device memory SAFE_CALL(hipMemcpy(d_red, h_red, colorSize, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemcpy(d_green, h_green, colorSize, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemcpy(d_blue, h_blue, colorSize, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemcpy(d_edited, h_edited, editedSize, hipMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); //End Region int threads_per_block = threads / blocks_per_grid; //launch the blur conversion kernel hipLaunchKernelGGL(( blur_img_kernel), dim3(blocks_per_grid), dim3(threads_per_block), 0, 0, d_red, d_green, d_blue, d_edited, cols, rows, kernel, blockSize); //synchronize tu check errors in any kernel SAFE_CALL(hipDeviceSynchronize(), "Kernel Launch Failed"); //Region retrieve memory from device to host SAFE_CALL(hipMemcpy(h_red, d_red, colorSize, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemcpy(h_green, d_green, colorSize, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(hipMemcpy(h_blue, d_blue, colorSize, hipMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); //End Region //Region free the device memory SAFE_CALL(hipFree(d_red), "CUDA Free Failed"); SAFE_CALL(hipFree(d_green), "CUDA Free Failed"); SAFE_CALL(hipFree(d_blue), "CUDA Free Failed"); SAFE_CALL(hipFree(d_edited), "CUDA Free Failed"); //End Region //Region create output image Vec3d outVect; int index2 = 0; //iterate over image size to assign pixel color results in output for(int io = 0; io < cols; io++) { for(int jo = 0; jo < rows; jo++) { index2 = io + (jo * cols); outVect = Vec3d((double)h_red[index2]/255.0, (double)h_green[index2]/255.0, (double)h_blue[index2]/255.0); output.at<Vec3d>(Point(io, jo)) = outVect; } } //End Region //Region free host memory free(h_red); free(h_green); free(h_blue); free(h_edited); //End Region } int main(int argc, char** argv) { //if there was an error with the input parameters if(argc != 5) { cout << "Missing or incorrect input parameters" << endl; cout << "Params:" << endl; cout << "1. image name: example mario.jpg" << endl; cout << "2. kernel: size odd number example: 17" << endl; cout << "3. threads: number of gpu threads that will be used example: 192" << endl; cout << "4. block2: number of gpu blocks that will be used example: 2" << endl; cout << "5. is testing: 0 to display images 1 to enable testing mode" << endl; return 0; } //variable to store input image name char* image_name = (char *)malloc(sizeof(char) * 256); //flag to set testing mode(not display image) int isTesting = 0; //Region capture input params sscanf(argv[1], "%s", image_name); sscanf(argv[2], "%i", &kernel_size); sscanf(argv[3], "%i", &threads); sscanf(argv[4], "%i", &blocks_per_grid); sscanf(argv[5], "%i", &isTesting); //End Region //variable to store start time int start_s = clock(); //Region reading input image in img folder Mat input = imread(Str("img/") + image_name, 1); if (input.empty()) { cout << "error: image not read from file\n\n"; return(0); } //End Region //validate that kernel is odd if (kernel_size % 2 == 0) { cout << "error: arg 2 kernel size must be odd\n\n"; return(0); } //validate that number of threads is divisible on the number of blocks if (threads % blocks_per_grid != 0) { cout << Str(threads % blocks_per_grid) + " error: args 3 and 4 number of threads(3) must be divisible in blocks per grid(4)\n\n"; return(0); } //width and height of the image int rows = input.rows; int cols = input.cols; //create Mat for output image cv::Mat output(rows, cols, CV_64FC3); //launch function to apply blur make_blur(input, output); //if testing mode is disabled then show images if(isTesting == 0) { namedWindow("Input", CV_WINDOW_NORMAL); namedWindow("Output", CV_WINDOW_NORMAL); cv::imshow("Input", input); cv::imshow("Output",output); cv::waitKey(); } //variable to store end time int stop_s = clock(); //free images memory input.release(); output.release(); //print performance information cout << cols << "x"; cout << rows << "\t"; cout << threads << "\t"; cout << blocks_per_grid << "\t"; cout << kernel_size << "\t"; cout << (stop_s-start_s)/double(CLOCKS_PER_SEC)*1000 << " ms" << endl; return 0; }
db2f3676b4f53f3f41cfe7f1ce6eb03aa430f194.cu
#include <stdio.h> #include <math.h> // For the CUDA runtime routines (prefixed with "cuda_") #include <cuda_runtime.h> #include <cv.h> #include <cxcore.h> #include <highgui.h> #include <ctime> #include <cstdlib> #include <iostream> using namespace cv; using namespace std; int threads; int kernel_size; int indexes_count; int blocks_per_grid; //function to convert to string template <typename T> string Str(const T & t) { ostringstream os; os << t; return os.str(); } /* function to safely manage error un cuda memory allocation */ static inline void _safe_cuda_call(cudaError err, const char* msg, const char* file_name, const int line_number) { if(err != cudaSuccess) { fprintf(stderr,"%s\n\nFile: %s\n\nLine Number: %d\n\nReason: %s\n", msg,file_name, line_number, cudaGetErrorString(err)); std::cin.get(); exit(EXIT_FAILURE); } } #define SAFE_CALL(call,msg) _safe_cuda_call((call),(msg),__FILE__,__LINE__) /* function to compute blur */ __global__ void blur_img_kernel(short int* dRed, short int* dGreen, short int* dBlue, bool* dEdited, int cols, int rows, int kernel, int blockSize) { //Thread index const int xIndex = blockIdx.x * blockDim.x + threadIdx.x; //variables to allocate the edges of the working thread int startBlockX = xIndex * blockSize; int endBlockX = (xIndex + 1) * blockSize; //variables to allocate the edges of the kernel int startX = 0; int startY = 0; int endX = cols; int endY = rows; //Index to modify int vectIndex = 0; //variable to iterate over the kernel indexes int subindex = 0; //variables to compute pixel sum inside the kernel (Red, Green, Blue) double averageR = 0; double averageG = 0; double averageB = 0; //cumpute the total of pixels (technically kernel²) double total = 0; //handling error on overflow because of the last block exceeds number of cols if(endBlockX > cols) endBlockX = cols; //iterate over the block of the working thread for(int i = startBlockX; i < endBlockX; i++) { for(int j = 0; j < rows; j++) { total = 0; subindex = 0; vectIndex = i + (j * cols); averageR = 0; averageG = 0; averageB = 0; if(dEdited[vectIndex] == true) continue; //control if the pixel was modified by another thread //Region compute kernel edges startX = 0; if(i - (kernel - 1) / 2 > 0) startX = i - (kernel - 1) / 2; startY = 0; if(j - (kernel - 1) / 2 > 0) startY = j - (kernel - 1) / 2; endX = cols; if(i + (kernel - 1) / 2 < cols) endX = i + (kernel - 1) / 2; endY = rows; if(j + (kernel - 1) / 2 < rows) endY = j + (kernel - 1) / 2; //End Region //if kernel is equal to 1 return the pixel as it is if(kernel == 1) { averageR = dRed[vectIndex]; averageG = dGreen[vectIndex]; averageB = dBlue[vectIndex]; total = 1; } //iterate over the kernel computing pixel RGB sums and total of pixels for(int k = startX; k <= endX; k++) { for(int k2 = startY; k2 <= endY; k2++) { subindex = k + (k2 * cols); averageR = averageR + (double)dRed[subindex]; averageG = averageG + (double)dGreen[subindex]; averageB = averageB + (double)dBlue[subindex]; total = total + 1; } } //assign average value per channel of color (RGB) dRed[vectIndex] = (short int)(averageR / total); dGreen[vectIndex] = (short int)(averageG / total); dBlue[vectIndex] = (short int)(averageB / total); //flag to check the current pixel as modified dEdited[vectIndex] = true; } } } /* function to allocate memory, call cuda blur fuction and apply to output the blur effect */ void make_blur(const cv::Mat& input, cv::Mat& output) { //input variables int rows = input.rows; int cols = input.cols; int kernel = kernel_size; //size of the image (number of pixels int imgSize = input.rows * input.cols; //number of parallel tasks int indexes = blocks_per_grid * threads; //size of the block taken of the image per task int blockSize = ceil((double)cols / (double)indexes); //variables to store RGB pixels in host memory short int *h_red = new short int[imgSize]; short int *h_green = new short int[imgSize]; short int *h_blue = new short int[imgSize]; //variables to store RGB pixels in device memory short int *d_red, *d_green, *d_blue; //variable to know if a pixel was modified in host memory bool *h_edited = new bool[imgSize]; //variable to know if a pixel was modified in device memory bool *d_edited; Mat inputCopy = input.clone(); //size of a pixel color array int colorSize = sizeof(short int) * imgSize; //size of a boolean array of the image int editedSize = sizeof(bool) * imgSize; //iterate over the image pixel to initialize the data of host memory int index = 0; for(int i = 0; i < cols; i++) { for(int j = 0; j < rows; j++) { index = i + (j * cols); Vec3b vect = inputCopy.at<Vec3b>(Point(i, j)); h_red[index] = (short int)vect[0]; h_green[index] = (short int)vect[1]; h_blue[index] = (short int)vect[2]; h_edited[index] = false; } } inputCopy.release(); //Region allocate host memory data in device memory SAFE_CALL(cudaMalloc<short int>(&d_red, colorSize), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<short int>(&d_green, colorSize), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<short int>(&d_blue, colorSize), "CUDA Malloc Failed"); SAFE_CALL(cudaMalloc<bool>(&d_edited, editedSize), "CUDA Malloc Failed"); //End Region //Region copy data from OpenCV input image to device memory SAFE_CALL(cudaMemcpy(d_red, h_red, colorSize, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemcpy(d_green, h_green, colorSize, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemcpy(d_blue, h_blue, colorSize, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemcpy(d_edited, h_edited, editedSize, cudaMemcpyHostToDevice), "CUDA Memcpy Host To Device Failed"); //End Region int threads_per_block = threads / blocks_per_grid; //launch the blur conversion kernel blur_img_kernel<<<blocks_per_grid, threads_per_block>>>(d_red, d_green, d_blue, d_edited, cols, rows, kernel, blockSize); //synchronize tu check errors in any kernel SAFE_CALL(cudaDeviceSynchronize(), "Kernel Launch Failed"); //Region retrieve memory from device to host SAFE_CALL(cudaMemcpy(h_red, d_red, colorSize, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemcpy(h_green, d_green, colorSize, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); SAFE_CALL(cudaMemcpy(h_blue, d_blue, colorSize, cudaMemcpyDeviceToHost), "CUDA Memcpy Host To Device Failed"); //End Region //Region free the device memory SAFE_CALL(cudaFree(d_red), "CUDA Free Failed"); SAFE_CALL(cudaFree(d_green), "CUDA Free Failed"); SAFE_CALL(cudaFree(d_blue), "CUDA Free Failed"); SAFE_CALL(cudaFree(d_edited), "CUDA Free Failed"); //End Region //Region create output image Vec3d outVect; int index2 = 0; //iterate over image size to assign pixel color results in output for(int io = 0; io < cols; io++) { for(int jo = 0; jo < rows; jo++) { index2 = io + (jo * cols); outVect = Vec3d((double)h_red[index2]/255.0, (double)h_green[index2]/255.0, (double)h_blue[index2]/255.0); output.at<Vec3d>(Point(io, jo)) = outVect; } } //End Region //Region free host memory free(h_red); free(h_green); free(h_blue); free(h_edited); //End Region } int main(int argc, char** argv) { //if there was an error with the input parameters if(argc != 5) { cout << "Missing or incorrect input parameters" << endl; cout << "Params:" << endl; cout << "1. image name: example mario.jpg" << endl; cout << "2. kernel: size odd number example: 17" << endl; cout << "3. threads: number of gpu threads that will be used example: 192" << endl; cout << "4. block2: number of gpu blocks that will be used example: 2" << endl; cout << "5. is testing: 0 to display images 1 to enable testing mode" << endl; return 0; } //variable to store input image name char* image_name = (char *)malloc(sizeof(char) * 256); //flag to set testing mode(not display image) int isTesting = 0; //Region capture input params sscanf(argv[1], "%s", image_name); sscanf(argv[2], "%i", &kernel_size); sscanf(argv[3], "%i", &threads); sscanf(argv[4], "%i", &blocks_per_grid); sscanf(argv[5], "%i", &isTesting); //End Region //variable to store start time int start_s = clock(); //Region reading input image in img folder Mat input = imread(Str("img/") + image_name, 1); if (input.empty()) { cout << "error: image not read from file\n\n"; return(0); } //End Region //validate that kernel is odd if (kernel_size % 2 == 0) { cout << "error: arg 2 kernel size must be odd\n\n"; return(0); } //validate that number of threads is divisible on the number of blocks if (threads % blocks_per_grid != 0) { cout << Str(threads % blocks_per_grid) + " error: args 3 and 4 number of threads(3) must be divisible in blocks per grid(4)\n\n"; return(0); } //width and height of the image int rows = input.rows; int cols = input.cols; //create Mat for output image cv::Mat output(rows, cols, CV_64FC3); //launch function to apply blur make_blur(input, output); //if testing mode is disabled then show images if(isTesting == 0) { namedWindow("Input", CV_WINDOW_NORMAL); namedWindow("Output", CV_WINDOW_NORMAL); cv::imshow("Input", input); cv::imshow("Output",output); cv::waitKey(); } //variable to store end time int stop_s = clock(); //free images memory input.release(); output.release(); //print performance information cout << cols << "x"; cout << rows << "\t"; cout << threads << "\t"; cout << blocks_per_grid << "\t"; cout << kernel_size << "\t"; cout << (stop_s-start_s)/double(CLOCKS_PER_SEC)*1000 << " ms" << endl; return 0; }
2cc17ee4ecba201e00b0fe2d10014d3b2579f508.hip
// !!! This is a file automatically generated by hipify!!! //---------------------------------------------------------------------------// // Copyright (c) 2013-2014 Kyle Lutz <[email protected]> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #include <algorithm> #include <cstdlib> #include <iostream> #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/reduce.h> #include "perf.hpp" int main(int argc, char *argv[]) { perf_parse_args(argc, argv); std::cout << "size: " << PERF_N << std::endl; thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N); // transfer data to the device thrust::device_vector<int> d_vec = h_vec; int sum = 0; perf_timer t; for(size_t trial = 0; trial < PERF_TRIALS; trial++){ t.start(); sum = thrust::reduce(d_vec.begin(), d_vec.end()); hipDeviceSynchronize(); t.stop(); } std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl; std::cout << "sum: " << sum << std::endl; return 0; }
2cc17ee4ecba201e00b0fe2d10014d3b2579f508.cu
//---------------------------------------------------------------------------// // Copyright (c) 2013-2014 Kyle Lutz <[email protected]> // // Distributed under the Boost Software License, Version 1.0 // See accompanying file LICENSE_1_0.txt or copy at // http://www.boost.org/LICENSE_1_0.txt // // See http://boostorg.github.com/compute for more information. //---------------------------------------------------------------------------// #include <algorithm> #include <cstdlib> #include <iostream> #include <thrust/copy.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/host_vector.h> #include <thrust/reduce.h> #include "perf.hpp" int main(int argc, char *argv[]) { perf_parse_args(argc, argv); std::cout << "size: " << PERF_N << std::endl; thrust::host_vector<int> h_vec = generate_random_vector<int>(PERF_N); // transfer data to the device thrust::device_vector<int> d_vec = h_vec; int sum = 0; perf_timer t; for(size_t trial = 0; trial < PERF_TRIALS; trial++){ t.start(); sum = thrust::reduce(d_vec.begin(), d_vec.end()); cudaDeviceSynchronize(); t.stop(); } std::cout << "time: " << t.min_time() / 1e6 << " ms" << std::endl; std::cout << "sum: " << sum << std::endl; return 0; }
6531e2bca6d3c9bb35aff1bec0e256f6cc4c3909.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <iostream> #include <vector> #include <time.h> #include <math.h> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of hipError_t error */ \ do { \ hipError_t error = condition; \ if (error != hipSuccess) { \ std::cout << hipGetErrorString(error) << std::endl; \ } \ } while (0) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) const int block_num = 512; #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) const int threadsPerBlock = sizeof(unsigned long long) * 8; __global__ void three_nn_gpu(const int b, const int n, const int m, const float* xyz1, const float* xyz2, float* dist, int* idx){ // Find three nearest neighbors with square distance, from xyz1 to xyz2 // input: xyz1: (b, n, 3), xyz2: (b, m, 3) // output: dist: (b, n, 3), idx: (b, n, 3) int total_idx = b * n; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int cur_batch_idx = point_inds / n; const float* cur_xyz1 = xyz1 + point_inds * 3; const float cur_xyz1_x = cur_xyz1[0]; const float cur_xyz1_y = cur_xyz1[1]; const float cur_xyz1_z = cur_xyz1[2]; float cur_xyz2_x, cur_xyz2_y, cur_xyz2_z; const float* cur_xyz2 = xyz2 + cur_batch_idx * m * 3; float* cur_dist = dist + point_inds * 3; int* cur_idx = idx + point_inds * 3; double best1 = 1e40; double best2 = 1e40; double best3 = 1e40; double d; int besti1 = 0; int besti2 = 0; int besti3 = 0; for (int i = 0; i < m; i++){ // compare the distance to each xyz2 points cur_xyz2_x = cur_xyz2[i * 3 + 0]; cur_xyz2_y = cur_xyz2[i * 3 + 1]; cur_xyz2_z = cur_xyz2[i * 3 + 2]; d = (cur_xyz2_x - cur_xyz1_x) * (cur_xyz2_x - cur_xyz1_x) + (cur_xyz2_y - cur_xyz1_y) * (cur_xyz2_y - cur_xyz1_y) + (cur_xyz2_z - cur_xyz1_z) * (cur_xyz2_z - cur_xyz1_z); if (d < best1){ best3=best2; besti3=besti2; best2=best1; besti2=besti1; best1=d; besti1=i; } else if (d < best2){ best3=best2; besti3=besti2; best2=d; besti2=i; } else if (d < best3){ best3=d; besti3=i; } } cur_dist[0] = best1; cur_dist[1] = best2; cur_dist[2] = best3; cur_idx[0] = besti1; cur_idx[1] = besti2; cur_idx[2] = besti3; } } __global__ void three_interpolate_gpu(const int b, const int m, const int c, const int n, const float* points, const int* idx, const float* weight, float* out){ // input: points: (b, m, c), idx: (b, n, 3), weight: (b, n, 3) // out: (b, n, c) int total_idx = b * n * c; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int cur_batch_inds = point_inds / (n * c); int cur_point_inds = point_inds / c; int cur_channel_inds = point_inds % c; const float* cur_points = points + cur_batch_inds * m * c; const int* cur_idx = idx + cur_point_inds * 3; const float* cur_weight = weight + cur_point_inds * 3; float w1 = cur_weight[0]; float w2 = cur_weight[1]; float w3 = cur_weight[2]; int i1 = cur_idx[0]; int i2 = cur_idx[1]; int i3 = cur_idx[2]; float c1 = cur_points[i1 * c + cur_channel_inds]; float c2 = cur_points[i2 * c + cur_channel_inds]; float c3 = cur_points[i3 * c + cur_channel_inds]; out[point_inds] = c1 * w1 + c2 * w2 + c3 * w3; } } __global__ void three_interpolate_grad_gpu(const int b, const int n, const int c, const int m, const float* grad_out, const int* idx, const float* weight, float* grad_points){ // input: grad_out: [b, n, c] idx [b, n, 3], weight [b, n, 3] // output: grad_points [b, m, c] int total_idx = b * n * c; CUDA_1D_KERNEL_LOOP(points_inds, total_idx){ int cur_batch_inds = points_inds / (n * c); int cur_points_inds = points_inds / c; int cur_channel_inds = points_inds % c; float* cur_grad_points = grad_points + cur_batch_inds * m * c; const float* cur_grad_out = grad_out + points_inds; const int* cur_idx = idx + cur_points_inds * 3; const float* cur_weight = weight + cur_points_inds * 3; float w1 = cur_weight[0]; float w2 = cur_weight[1]; float w3 = cur_weight[2]; int i1 = cur_idx[0]; int i2 = cur_idx[1]; int i3 = cur_idx[2]; atomicAdd(&cur_grad_points[i1 * c + cur_channel_inds], cur_grad_out[0] * w1); atomicAdd(&cur_grad_points[i2 * c + cur_channel_inds], cur_grad_out[0] * w2); atomicAdd(&cur_grad_points[i3 * c + cur_channel_inds], cur_grad_out[0] * w3); } } __global__ void k_interpolate_gpu(const int b, const int m, const int c, const int n, const int k, const float* points, const int* idx, const float* weight, float* out){ // input: points: (b, m, c), idx: (b, n, k), weight: (b, n, k) // out: (b, n, c) int total_idx = b * n * c; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int cur_batch_inds = point_inds / (n * c); int cur_point_inds = point_inds / c; int cur_channel_inds = point_inds % c; const float* cur_points = points + cur_batch_inds * m * c; const int* cur_idx = idx + cur_point_inds * k; const float* cur_weight = weight + cur_point_inds * k; float w, ci; int index; out[point_inds] = 0; for (int i=0; i < k; i++){ index = cur_idx[i]; w = cur_weight[i]; ci = cur_points[index * c + cur_channel_inds]; out[point_inds] += w * ci; } } } __global__ void k_interpolate_grad_gpu(const int b, const int n, const int c, const int m, const int k, const float* grad_out, const int* idx, const float* weight, float* grad_points){ // input: grad_out: [b, n, c] idx [b, n, k], weight [b, n, k] // output: grad_points [b, m, c] int total_idx = b * n * c; CUDA_1D_KERNEL_LOOP(points_inds, total_idx){ int cur_batch_inds = points_inds / (n * c); int cur_points_inds = points_inds / c; int cur_channel_inds = points_inds % c; float* cur_grad_points = grad_points + cur_batch_inds * m * c; const float* cur_grad_out = grad_out + points_inds; const int* cur_idx = idx + cur_points_inds * k; const float* cur_weight = weight + cur_points_inds * k; float w; int index; for (int i=0; i<k; i++){ w = cur_weight[i]; index = cur_idx[i]; atomicAdd(&cur_grad_points[index * c + cur_channel_inds], cur_grad_out[0] * w); } } } void ThreeNNLauncher(const int b, const int n, const int m, const float* xyz1, const float* xyz2, float* dist, int* idx){ //std::cout << "beginning forwarding" << std::endl; hipLaunchKernelGGL(( three_nn_gpu), dim3(block_num), dim3(threadsPerBlock), 0, 0, b, n, m, xyz1, xyz2, dist, idx); //std::cout << "Finishing forwarding" << std::endl; } void ThreeInterpolateLauncher(const int b, const int m, const int c, const int n, const float* points, const int* idx, const float* weight, float* out){ hipLaunchKernelGGL(( three_interpolate_gpu), dim3(block_num), dim3(threadsPerBlock), 0, 0, b, m, c, n, points, idx, weight, out); } void ThreeInterpolateGradLauncher(const int b, const int n, const int c, const int m, const float* grad_out, const int* idx, const float* weight, float* grad_points){ // grad_out: [b, n, c] // idx: [b, n, 3], weight: [b. n, 3], grad_points: [b, m, c] hipLaunchKernelGGL(( three_interpolate_grad_gpu), dim3(block_num), dim3(threadsPerBlock), 0, 0, b, n, c, m, grad_out, idx, weight, grad_points); } void KInterpolateLauncher(const int b, const int m, const int c, const int n, const int k, const float* points, const int* idx, const float* weight, float* out){ hipLaunchKernelGGL(( k_interpolate_gpu), dim3(block_num), dim3(threadsPerBlock), 0, 0, b, m, c, n, k, points, idx, weight, out); } void KInterpolateGradLauncher(const int b, const int n, const int c, const int m, const int k, const float* grad_out, const int* idx, const float* weight, float* grad_points){ // grad_out: [b, n, c] // idx: [b, n, 3], weight: [b. n, 3], grad_points: [b, m, c] hipLaunchKernelGGL(( k_interpolate_grad_gpu), dim3(block_num), dim3(threadsPerBlock), 0, 0, b, n, c, m, k, grad_out, idx, weight, grad_points); }
6531e2bca6d3c9bb35aff1bec0e256f6cc4c3909.cu
#include <stdio.h> #include <iostream> #include <vector> #include <time.h> #include <math.h> #define CUDA_CHECK(condition) \ /* Code block avoids redefinition of cudaError_t error */ \ do { \ cudaError_t error = condition; \ if (error != cudaSuccess) { \ std::cout << cudaGetErrorString(error) << std::endl; \ } \ } while (0) #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) const int block_num = 512; #define DIVUP(m,n) ((m) / (n) + ((m) % (n) > 0)) const int threadsPerBlock = sizeof(unsigned long long) * 8; __global__ void three_nn_gpu(const int b, const int n, const int m, const float* xyz1, const float* xyz2, float* dist, int* idx){ // Find three nearest neighbors with square distance, from xyz1 to xyz2 // input: xyz1: (b, n, 3), xyz2: (b, m, 3) // output: dist: (b, n, 3), idx: (b, n, 3) int total_idx = b * n; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int cur_batch_idx = point_inds / n; const float* cur_xyz1 = xyz1 + point_inds * 3; const float cur_xyz1_x = cur_xyz1[0]; const float cur_xyz1_y = cur_xyz1[1]; const float cur_xyz1_z = cur_xyz1[2]; float cur_xyz2_x, cur_xyz2_y, cur_xyz2_z; const float* cur_xyz2 = xyz2 + cur_batch_idx * m * 3; float* cur_dist = dist + point_inds * 3; int* cur_idx = idx + point_inds * 3; double best1 = 1e40; double best2 = 1e40; double best3 = 1e40; double d; int besti1 = 0; int besti2 = 0; int besti3 = 0; for (int i = 0; i < m; i++){ // compare the distance to each xyz2 points cur_xyz2_x = cur_xyz2[i * 3 + 0]; cur_xyz2_y = cur_xyz2[i * 3 + 1]; cur_xyz2_z = cur_xyz2[i * 3 + 2]; d = (cur_xyz2_x - cur_xyz1_x) * (cur_xyz2_x - cur_xyz1_x) + (cur_xyz2_y - cur_xyz1_y) * (cur_xyz2_y - cur_xyz1_y) + (cur_xyz2_z - cur_xyz1_z) * (cur_xyz2_z - cur_xyz1_z); if (d < best1){ best3=best2; besti3=besti2; best2=best1; besti2=besti1; best1=d; besti1=i; } else if (d < best2){ best3=best2; besti3=besti2; best2=d; besti2=i; } else if (d < best3){ best3=d; besti3=i; } } cur_dist[0] = best1; cur_dist[1] = best2; cur_dist[2] = best3; cur_idx[0] = besti1; cur_idx[1] = besti2; cur_idx[2] = besti3; } } __global__ void three_interpolate_gpu(const int b, const int m, const int c, const int n, const float* points, const int* idx, const float* weight, float* out){ // input: points: (b, m, c), idx: (b, n, 3), weight: (b, n, 3) // out: (b, n, c) int total_idx = b * n * c; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int cur_batch_inds = point_inds / (n * c); int cur_point_inds = point_inds / c; int cur_channel_inds = point_inds % c; const float* cur_points = points + cur_batch_inds * m * c; const int* cur_idx = idx + cur_point_inds * 3; const float* cur_weight = weight + cur_point_inds * 3; float w1 = cur_weight[0]; float w2 = cur_weight[1]; float w3 = cur_weight[2]; int i1 = cur_idx[0]; int i2 = cur_idx[1]; int i3 = cur_idx[2]; float c1 = cur_points[i1 * c + cur_channel_inds]; float c2 = cur_points[i2 * c + cur_channel_inds]; float c3 = cur_points[i3 * c + cur_channel_inds]; out[point_inds] = c1 * w1 + c2 * w2 + c3 * w3; } } __global__ void three_interpolate_grad_gpu(const int b, const int n, const int c, const int m, const float* grad_out, const int* idx, const float* weight, float* grad_points){ // input: grad_out: [b, n, c] idx [b, n, 3], weight [b, n, 3] // output: grad_points [b, m, c] int total_idx = b * n * c; CUDA_1D_KERNEL_LOOP(points_inds, total_idx){ int cur_batch_inds = points_inds / (n * c); int cur_points_inds = points_inds / c; int cur_channel_inds = points_inds % c; float* cur_grad_points = grad_points + cur_batch_inds * m * c; const float* cur_grad_out = grad_out + points_inds; const int* cur_idx = idx + cur_points_inds * 3; const float* cur_weight = weight + cur_points_inds * 3; float w1 = cur_weight[0]; float w2 = cur_weight[1]; float w3 = cur_weight[2]; int i1 = cur_idx[0]; int i2 = cur_idx[1]; int i3 = cur_idx[2]; atomicAdd(&cur_grad_points[i1 * c + cur_channel_inds], cur_grad_out[0] * w1); atomicAdd(&cur_grad_points[i2 * c + cur_channel_inds], cur_grad_out[0] * w2); atomicAdd(&cur_grad_points[i3 * c + cur_channel_inds], cur_grad_out[0] * w3); } } __global__ void k_interpolate_gpu(const int b, const int m, const int c, const int n, const int k, const float* points, const int* idx, const float* weight, float* out){ // input: points: (b, m, c), idx: (b, n, k), weight: (b, n, k) // out: (b, n, c) int total_idx = b * n * c; CUDA_1D_KERNEL_LOOP(point_inds, total_idx){ int cur_batch_inds = point_inds / (n * c); int cur_point_inds = point_inds / c; int cur_channel_inds = point_inds % c; const float* cur_points = points + cur_batch_inds * m * c; const int* cur_idx = idx + cur_point_inds * k; const float* cur_weight = weight + cur_point_inds * k; float w, ci; int index; out[point_inds] = 0; for (int i=0; i < k; i++){ index = cur_idx[i]; w = cur_weight[i]; ci = cur_points[index * c + cur_channel_inds]; out[point_inds] += w * ci; } } } __global__ void k_interpolate_grad_gpu(const int b, const int n, const int c, const int m, const int k, const float* grad_out, const int* idx, const float* weight, float* grad_points){ // input: grad_out: [b, n, c] idx [b, n, k], weight [b, n, k] // output: grad_points [b, m, c] int total_idx = b * n * c; CUDA_1D_KERNEL_LOOP(points_inds, total_idx){ int cur_batch_inds = points_inds / (n * c); int cur_points_inds = points_inds / c; int cur_channel_inds = points_inds % c; float* cur_grad_points = grad_points + cur_batch_inds * m * c; const float* cur_grad_out = grad_out + points_inds; const int* cur_idx = idx + cur_points_inds * k; const float* cur_weight = weight + cur_points_inds * k; float w; int index; for (int i=0; i<k; i++){ w = cur_weight[i]; index = cur_idx[i]; atomicAdd(&cur_grad_points[index * c + cur_channel_inds], cur_grad_out[0] * w); } } } void ThreeNNLauncher(const int b, const int n, const int m, const float* xyz1, const float* xyz2, float* dist, int* idx){ //std::cout << "beginning forwarding" << std::endl; three_nn_gpu<<<block_num, threadsPerBlock>>>(b, n, m, xyz1, xyz2, dist, idx); //std::cout << "Finishing forwarding" << std::endl; } void ThreeInterpolateLauncher(const int b, const int m, const int c, const int n, const float* points, const int* idx, const float* weight, float* out){ three_interpolate_gpu<<<block_num, threadsPerBlock>>>(b, m, c, n, points, idx, weight, out); } void ThreeInterpolateGradLauncher(const int b, const int n, const int c, const int m, const float* grad_out, const int* idx, const float* weight, float* grad_points){ // grad_out: [b, n, c] // idx: [b, n, 3], weight: [b. n, 3], grad_points: [b, m, c] three_interpolate_grad_gpu<<<block_num, threadsPerBlock>>>(b, n, c, m, grad_out, idx, weight, grad_points); } void KInterpolateLauncher(const int b, const int m, const int c, const int n, const int k, const float* points, const int* idx, const float* weight, float* out){ k_interpolate_gpu<<<block_num, threadsPerBlock>>>(b, m, c, n, k, points, idx, weight, out); } void KInterpolateGradLauncher(const int b, const int n, const int c, const int m, const int k, const float* grad_out, const int* idx, const float* weight, float* grad_points){ // grad_out: [b, n, c] // idx: [b, n, 3], weight: [b. n, 3], grad_points: [b, m, c] k_interpolate_grad_gpu<<<block_num, threadsPerBlock>>>(b, n, c, m, k, grad_out, idx, weight, grad_points); }
da94797114d7d4f7fbe395aca54a40ba7fb0a997.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "sparse_ops.h" #define FULL_MASK 0xffffffff /** * Compute the maximum value of elements across the row in a sparse matrix. * * Threads per Block : (32, 32) */ __device__ __forceinline__ float reduce_sparse_matrix_32x32_row_max( const float* __restrict__ matrix, const short* __restrict__ sparse_blocks, uint start_block_ptr, uint end_block_ptr, uint offset_row ) { __shared__ float shared[32]; // Get values from strided sparse blocks and find the maximum value. float max = -1e5f; uint block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); max = fmaxf(max, matrix[sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x]); } // Reduce the values in each warp and get the maximum value among them. for (uint offset = 16; offset > 0; offset /= 2) max = fmaxf(max, __shfl_down_sync(FULL_MASK, max, offset)); max = __shfl_sync(FULL_MASK, max, 0); // The first threads of warps write the locally-reduced maximum values to // the shared memory for reducing in thread-level. if (threadIdx.x == 0) shared[threadIdx.y] = max; __syncthreads(); // The first warp in each block calculates the final maximum value from the // shared memory. if (threadIdx.y == 0) { max = shared[threadIdx.x]; for (uint offset = 16; offset > 0; offset /= 2) max = fmaxf(max, __shfl_down_sync(FULL_MASK, max, offset)); shared[threadIdx.x] = max; } __syncthreads(); return shared[0]; } /** * Compute the sum of elements across the row in a sparse matrix. * * Threads per Block : (32, 32) */ __device__ __forceinline__ float reduce_sparse_matrix_32x32_row_sum( const float* __restrict__ matrix, const short* __restrict__ sparse_blocks, uint start_block_ptr, uint end_block_ptr, uint offset_row ) { __shared__ float shared[32]; // Get values from strided sparse blocks and calculate the sum. float sum = 0.0f; uint block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); sum += matrix[sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x]; } // Reduce the values in each warp and compute local sum. for (uint offset = 16; offset > 0; offset /= 2) sum += __shfl_down_sync(FULL_MASK, sum, offset); sum = __shfl_sync(FULL_MASK, sum, 0); // The first threads of warps write the locally-reduced summed values to // the shared memory for reducing in thread-level. if (threadIdx.x == 0) shared[threadIdx.y] = sum; __syncthreads(); // The first warp in each block calculates the final sum from the shared // memory. if (threadIdx.y == 0) { sum = shared[threadIdx.x]; for (uint offset = 16; offset > 0; offset /= 2) sum += __shfl_down_sync(FULL_MASK, sum, offset); shared[threadIdx.x] = sum; } __syncthreads(); return shared[0]; } /** * Calculate a softmax probability from the sparse logits matrix. * * Blocks : (Batches, Total Rows) * Threads per Block : (32, 32) */ __global__ void sparse_softmax_32x32_forward_kernel( const float* __restrict__ matrix_x, float* __restrict__ matrix_y, const short* __restrict__ sparse_blocks, const int* __restrict__ sparse_table, uint total_blocks ) { uint offset_row = blockIdx.y % TILE_32x32_WIDTH; uint start_block_ptr = sparse_table[blockIdx.y / TILE_32x32_WIDTH]; uint end_block_ptr = sparse_table[blockIdx.y / TILE_32x32_WIDTH + 1]; // Move to the current batch. matrix_x += blockIdx.x * total_blocks * TILE_32x32_SIZE; matrix_y += blockIdx.x * total_blocks * TILE_32x32_SIZE; // Get maximum value across the corresponding row and calculate stable // exponential by subtracting maximum logit to each one. float max = reduce_sparse_matrix_32x32_row_max( matrix_x, sparse_blocks, start_block_ptr, end_block_ptr, offset_row); uint block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); uint idx = sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x; matrix_y[idx] = __expf(matrix_x[idx] - max); } // Get total sum of exponential values and divide each value by the sum for // probability property (sum of probabilities is 1). float sum = reduce_sparse_matrix_32x32_row_sum( matrix_y, sparse_blocks, start_block_ptr, end_block_ptr, offset_row); block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); uint idx = sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x; matrix_y[idx] /= sum; } } /** * Calculate a gradient of input sparse logits matrix from the gradient of * softmax probability matrix. * * Blocks : (Batches, Total Rows) * Threads per Block : (32, 32) */ __global__ void sparse_softmax_32x32_backward_kernel( const float* __restrict__ matrix_y, const float* __restrict__ matrix_dy, float* __restrict__ matrix_dx, const short* __restrict__ sparse_blocks, const int* __restrict__ sparse_table, uint total_blocks ) { uint offset_row = blockIdx.y % TILE_32x32_WIDTH; uint start_block_ptr = sparse_table[blockIdx.y / TILE_32x32_WIDTH]; uint end_block_ptr = sparse_table[blockIdx.y / TILE_32x32_WIDTH + 1]; // Move to the current batch. matrix_y += blockIdx.x * total_blocks * TILE_32x32_SIZE; matrix_dy += blockIdx.x * total_blocks * TILE_32x32_SIZE; matrix_dx += blockIdx.x * total_blocks * TILE_32x32_SIZE; // Calculate the multiplication of the softmax probability and its gradient // and get sum of them to compute a gradient of softmax input. uint block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); uint idx = sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x; matrix_dx[idx] = matrix_dy[idx] * matrix_y[idx]; } float sum = reduce_sparse_matrix_32x32_row_sum( matrix_dx, sparse_blocks, start_block_ptr, end_block_ptr, offset_row); // Compute the gradient of softmax input. block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); uint idx = sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x; matrix_dx[idx] = (matrix_dy[idx] - sum) * matrix_y[idx]; } } torch::Tensor sparse_softmax_forward(torch::Tensor x, torch::Tensor row_blocks, torch::Tensor row_table) { auto output_shape = x.sizes(); // Merge all batch dimensions to single one and create empty output tensor. x = x.flatten(0, -4); auto y = torch::empty_like(x); // Get the dimension sizes. int64_t total_batches = x.size(0); int64_t total_blocks = row_blocks.size(0) / 2; int64_t total_rows = (row_table.size(0) - 1) * TILE_32x32_WIDTH; dim3 blocks(total_batches, total_rows); dim3 threadsPerBlock(TILE_32x32_WIDTH, TILE_32x32_WIDTH); hipLaunchKernelGGL(( sparse_softmax_32x32_forward_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, x.data_ptr<float>(), y.data_ptr<float>(), row_blocks.data_ptr<short>(), row_table.data_ptr<int>(), total_blocks ); return y.reshape(output_shape); } torch::Tensor sparse_softmax_backward(torch::Tensor y, torch::Tensor dy, torch::Tensor row_blocks, torch::Tensor row_table) { auto output_shape = y.sizes(); // Merge all batch dimensions to single one and create empty output tensor. y = y.flatten(0, -4); dy = dy.flatten(0, -4); auto dx = torch::empty_like(y); // Get the dimension sizes. int64_t total_batches = y.size(0); int64_t total_blocks = row_blocks.size(0) / 2; int64_t total_rows = (row_table.size(0) - 1) * TILE_32x32_WIDTH; dim3 blocks(total_batches, total_rows); dim3 threadsPerBlock(TILE_32x32_WIDTH, TILE_32x32_WIDTH); hipLaunchKernelGGL(( sparse_softmax_32x32_backward_kernel), dim3(blocks), dim3(threadsPerBlock), 0, 0, y.data_ptr<float>(), dy.data_ptr<float>(), dx.data_ptr<float>(), row_blocks.data_ptr<short>(), row_table.data_ptr<int>(), total_blocks ); return dx.reshape(output_shape); }
da94797114d7d4f7fbe395aca54a40ba7fb0a997.cu
#include <cuda.h> #include <cuda_runtime.h> #include "sparse_ops.h" #define FULL_MASK 0xffffffff /** * Compute the maximum value of elements across the row in a sparse matrix. * * Threads per Block : (32, 32) */ __device__ __forceinline__ float reduce_sparse_matrix_32x32_row_max( const float* __restrict__ matrix, const short* __restrict__ sparse_blocks, uint start_block_ptr, uint end_block_ptr, uint offset_row ) { __shared__ float shared[32]; // Get values from strided sparse blocks and find the maximum value. float max = -1e5f; uint block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); max = fmaxf(max, matrix[sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x]); } // Reduce the values in each warp and get the maximum value among them. for (uint offset = 16; offset > 0; offset /= 2) max = fmaxf(max, __shfl_down_sync(FULL_MASK, max, offset)); max = __shfl_sync(FULL_MASK, max, 0); // The first threads of warps write the locally-reduced maximum values to // the shared memory for reducing in thread-level. if (threadIdx.x == 0) shared[threadIdx.y] = max; __syncthreads(); // The first warp in each block calculates the final maximum value from the // shared memory. if (threadIdx.y == 0) { max = shared[threadIdx.x]; for (uint offset = 16; offset > 0; offset /= 2) max = fmaxf(max, __shfl_down_sync(FULL_MASK, max, offset)); shared[threadIdx.x] = max; } __syncthreads(); return shared[0]; } /** * Compute the sum of elements across the row in a sparse matrix. * * Threads per Block : (32, 32) */ __device__ __forceinline__ float reduce_sparse_matrix_32x32_row_sum( const float* __restrict__ matrix, const short* __restrict__ sparse_blocks, uint start_block_ptr, uint end_block_ptr, uint offset_row ) { __shared__ float shared[32]; // Get values from strided sparse blocks and calculate the sum. float sum = 0.0f; uint block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); sum += matrix[sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x]; } // Reduce the values in each warp and compute local sum. for (uint offset = 16; offset > 0; offset /= 2) sum += __shfl_down_sync(FULL_MASK, sum, offset); sum = __shfl_sync(FULL_MASK, sum, 0); // The first threads of warps write the locally-reduced summed values to // the shared memory for reducing in thread-level. if (threadIdx.x == 0) shared[threadIdx.y] = sum; __syncthreads(); // The first warp in each block calculates the final sum from the shared // memory. if (threadIdx.y == 0) { sum = shared[threadIdx.x]; for (uint offset = 16; offset > 0; offset /= 2) sum += __shfl_down_sync(FULL_MASK, sum, offset); shared[threadIdx.x] = sum; } __syncthreads(); return shared[0]; } /** * Calculate a softmax probability from the sparse logits matrix. * * Blocks : (Batches, Total Rows) * Threads per Block : (32, 32) */ __global__ void sparse_softmax_32x32_forward_kernel( const float* __restrict__ matrix_x, float* __restrict__ matrix_y, const short* __restrict__ sparse_blocks, const int* __restrict__ sparse_table, uint total_blocks ) { uint offset_row = blockIdx.y % TILE_32x32_WIDTH; uint start_block_ptr = sparse_table[blockIdx.y / TILE_32x32_WIDTH]; uint end_block_ptr = sparse_table[blockIdx.y / TILE_32x32_WIDTH + 1]; // Move to the current batch. matrix_x += blockIdx.x * total_blocks * TILE_32x32_SIZE; matrix_y += blockIdx.x * total_blocks * TILE_32x32_SIZE; // Get maximum value across the corresponding row and calculate stable // exponential by subtracting maximum logit to each one. float max = reduce_sparse_matrix_32x32_row_max( matrix_x, sparse_blocks, start_block_ptr, end_block_ptr, offset_row); uint block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); uint idx = sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x; matrix_y[idx] = __expf(matrix_x[idx] - max); } // Get total sum of exponential values and divide each value by the sum for // probability property (sum of probabilities is 1). float sum = reduce_sparse_matrix_32x32_row_sum( matrix_y, sparse_blocks, start_block_ptr, end_block_ptr, offset_row); block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); uint idx = sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x; matrix_y[idx] /= sum; } } /** * Calculate a gradient of input sparse logits matrix from the gradient of * softmax probability matrix. * * Blocks : (Batches, Total Rows) * Threads per Block : (32, 32) */ __global__ void sparse_softmax_32x32_backward_kernel( const float* __restrict__ matrix_y, const float* __restrict__ matrix_dy, float* __restrict__ matrix_dx, const short* __restrict__ sparse_blocks, const int* __restrict__ sparse_table, uint total_blocks ) { uint offset_row = blockIdx.y % TILE_32x32_WIDTH; uint start_block_ptr = sparse_table[blockIdx.y / TILE_32x32_WIDTH]; uint end_block_ptr = sparse_table[blockIdx.y / TILE_32x32_WIDTH + 1]; // Move to the current batch. matrix_y += blockIdx.x * total_blocks * TILE_32x32_SIZE; matrix_dy += blockIdx.x * total_blocks * TILE_32x32_SIZE; matrix_dx += blockIdx.x * total_blocks * TILE_32x32_SIZE; // Calculate the multiplication of the softmax probability and its gradient // and get sum of them to compute a gradient of softmax input. uint block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); uint idx = sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x; matrix_dx[idx] = matrix_dy[idx] * matrix_y[idx]; } float sum = reduce_sparse_matrix_32x32_row_sum( matrix_dx, sparse_blocks, start_block_ptr, end_block_ptr, offset_row); // Compute the gradient of softmax input. block_ptr = start_block_ptr + threadIdx.y; for (; block_ptr < end_block_ptr; block_ptr += blockDim.y) { short2 sparse_block = *((short2 *) sparse_blocks + block_ptr); uint idx = sparse_block.x * TILE_32x32_SIZE + offset_row * TILE_32x32_WIDTH + threadIdx.x; matrix_dx[idx] = (matrix_dy[idx] - sum) * matrix_y[idx]; } } torch::Tensor sparse_softmax_forward(torch::Tensor x, torch::Tensor row_blocks, torch::Tensor row_table) { auto output_shape = x.sizes(); // Merge all batch dimensions to single one and create empty output tensor. x = x.flatten(0, -4); auto y = torch::empty_like(x); // Get the dimension sizes. int64_t total_batches = x.size(0); int64_t total_blocks = row_blocks.size(0) / 2; int64_t total_rows = (row_table.size(0) - 1) * TILE_32x32_WIDTH; dim3 blocks(total_batches, total_rows); dim3 threadsPerBlock(TILE_32x32_WIDTH, TILE_32x32_WIDTH); sparse_softmax_32x32_forward_kernel<<<blocks, threadsPerBlock>>>( x.data_ptr<float>(), y.data_ptr<float>(), row_blocks.data_ptr<short>(), row_table.data_ptr<int>(), total_blocks ); return y.reshape(output_shape); } torch::Tensor sparse_softmax_backward(torch::Tensor y, torch::Tensor dy, torch::Tensor row_blocks, torch::Tensor row_table) { auto output_shape = y.sizes(); // Merge all batch dimensions to single one and create empty output tensor. y = y.flatten(0, -4); dy = dy.flatten(0, -4); auto dx = torch::empty_like(y); // Get the dimension sizes. int64_t total_batches = y.size(0); int64_t total_blocks = row_blocks.size(0) / 2; int64_t total_rows = (row_table.size(0) - 1) * TILE_32x32_WIDTH; dim3 blocks(total_batches, total_rows); dim3 threadsPerBlock(TILE_32x32_WIDTH, TILE_32x32_WIDTH); sparse_softmax_32x32_backward_kernel<<<blocks, threadsPerBlock>>>( y.data_ptr<float>(), dy.data_ptr<float>(), dx.data_ptr<float>(), row_blocks.data_ptr<short>(), row_table.data_ptr<int>(), total_blocks ); return dx.reshape(output_shape); }
eaa52b46b2772d246f8fee3f28642e65b3075396.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions mixed zc -> ds */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void zcgecsrmv_mixed_prec_kernel( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * ddiagval, magmaFloatComplex * doffdiagval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = ddiagval[ row ] * dx[ row ]; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++){ magmaDoubleComplex val = MAGMA_Z_MAKE( (double) MAGMA_C_REAL(doffdiagval[ j ]), (double) MAGMA_C_IMAG(doffdiagval[ j ]) ); dot += val * dx[ dcolind[j] ]; } dy[ row ] = dot *alpha + beta * dy[ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. A is a matrix in mixed precision, i.e. the diagonal values are stored in high precision, the offdiagonal values in low precision. The input format is a CSR (val, row, col) in FloatComplex storing all offdiagonal elements and an array containing the diagonal values in DoubleComplex. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] ddiagval magmaDoubleComplex_ptr array containing diagonal values of A in DoubleComplex @param[in] doffdiagval magmaFloatComplex_ptr array containing offdiag values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zcgecsrmv_mixed_prec( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr ddiagval, magmaFloatComplex_ptr doffdiagval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; hipLaunchKernelGGL(( zcgecsrmv_mixed_prec_kernel), dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, n, alpha, ddiagval, doffdiagval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
eaa52b46b2772d246f8fee3f28642e65b3075396.cu
/* -- MAGMA (version 2.5.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2019 @precisions mixed zc -> ds */ #include "magmasparse_internal.h" #define BLOCK_SIZE 256 // CSR-SpMV kernel __global__ void zcgecsrmv_mixed_prec_kernel( int num_rows, int num_cols, magmaDoubleComplex alpha, magmaDoubleComplex * ddiagval, magmaFloatComplex * doffdiagval, magma_index_t * drowptr, magma_index_t * dcolind, magmaDoubleComplex * dx, magmaDoubleComplex beta, magmaDoubleComplex * dy) { int row = blockIdx.x*blockDim.x+threadIdx.x; int j; if(row<num_rows){ magmaDoubleComplex dot = ddiagval[ row ] * dx[ row ]; int start = drowptr[ row ]; int end = drowptr[ row+1 ]; for( j=start; j<end; j++){ magmaDoubleComplex val = MAGMA_Z_MAKE( (double) MAGMA_C_REAL(doffdiagval[ j ]), (double) MAGMA_C_IMAG(doffdiagval[ j ]) ); dot += val * dx[ dcolind[j] ]; } dy[ row ] = dot *alpha + beta * dy[ row ]; } } /** Purpose ------- This routine computes y = alpha * A * x + beta * y on the GPU. A is a matrix in mixed precision, i.e. the diagonal values are stored in high precision, the offdiagonal values in low precision. The input format is a CSR (val, row, col) in FloatComplex storing all offdiagonal elements and an array containing the diagonal values in DoubleComplex. Arguments --------- @param[in] transA magma_trans_t transposition parameter for A @param[in] m magma_int_t number of rows in A @param[in] n magma_int_t number of columns in A @param[in] alpha magmaDoubleComplex scalar multiplier @param[in] ddiagval magmaDoubleComplex_ptr array containing diagonal values of A in DoubleComplex @param[in] doffdiagval magmaFloatComplex_ptr array containing offdiag values of A in CSR @param[in] drowptr magmaIndex_ptr rowpointer of A in CSR @param[in] dcolind magmaIndex_ptr columnindices of A in CSR @param[in] dx magmaDoubleComplex_ptr input vector x @param[in] beta magmaDoubleComplex scalar multiplier @param[out] dy magmaDoubleComplex_ptr input/output vector y @param[in] queue magma_queue_t Queue to execute in. @ingroup magmasparse_zblas ********************************************************************/ extern "C" magma_int_t magma_zcgecsrmv_mixed_prec( magma_trans_t transA, magma_int_t m, magma_int_t n, magmaDoubleComplex alpha, magmaDoubleComplex_ptr ddiagval, magmaFloatComplex_ptr doffdiagval, magmaIndex_ptr drowptr, magmaIndex_ptr dcolind, magmaDoubleComplex_ptr dx, magmaDoubleComplex beta, magmaDoubleComplex_ptr dy, magma_queue_t queue ) { dim3 grid( magma_ceildiv( m, BLOCK_SIZE ) ); magma_int_t threads = BLOCK_SIZE; zcgecsrmv_mixed_prec_kernel<<< grid, threads, 0, queue->cuda_stream() >>> (m, n, alpha, ddiagval, doffdiagval, drowptr, dcolind, dx, beta, dy); return MAGMA_SUCCESS; }
b33541d6c5ac4f651a8f4962c7d9bb3690ee890d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <groute/device/cta_scheduler.cuh> #include <groute/graphs/csr_graph.h> #include <groute/dwl/distributed_worklist.cuh> #include <groute/dwl/workers.cuh> #include <utils/graphs/traversal.h> #include <device_launch_parameters.h> #include "sssp_common.h" DECLARE_int32(source_node); const distance_t INF = UINT_MAX; namespace sssp { struct LocalData { index_t m_node; uint32_t m_iterated_round; __device__ __host__ __forceinline__ LocalData(uint32_t iterated_round, index_t node) : m_iterated_round(iterated_round), m_node(node) { } }; struct DistanceData { index_t node; distance_t distance; uint32_t iterated_round; uint32_t padding; __device__ __host__ __forceinline__ DistanceData(uint32_t iterated_round, index_t node, distance_t distance) : iterated_round(iterated_round), node(node), distance(distance) {} __device__ __host__ __forceinline__ DistanceData() : iterated_round(0), node(INF), distance(INF) { printf("call DistanceData default constructor\n"); } }; typedef LocalData local_work_t; typedef DistanceData remote_work_t; __global__ void SSSPMemsetKernel(index_t source_node, distance_t *distances, distance_t *delta, distance_t *last_delta, int nnodes) { int tid = TID_1D; if (tid < nnodes) { distances[tid] = INF; delta[tid] = INF; last_delta[tid] = INF; // if (tid == source_node) { // delta[tid] = 0; // last_delta[tid] = 0; // } } } template<bool CTAScheduling = true> /// SSSP work with Collective Thread Array scheduling for exploiting nested parallelism struct SSSPWork { template< typename WorkSource, typename WorkTarget, typename TGraph, typename TWeightDatum, typename TDistanceDatum> __device__ static void work( const uint32_t iterated_round, const WorkSource &work_source, WorkTarget &work_target, const TGraph &graph, TWeightDatum &edge_weights, TDistanceDatum &node_distances, TDistanceDatum &node_delta, TDistanceDatum &node_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = work_source.get_work(i).m_node; distance_t old_value = node_distances[node]; printf("abc: iter:%u %u %u %u\n", iterated_round, node_distances[node], node_delta[node], node_last_delta[node]); distance_t old_delta; if (iterated_round % 2) old_delta = atomicExch(node_delta.get_item_ptr(node), INF); else old_delta = atomicExch(node_last_delta.get_item_ptr(node), INF); distance_t new_value = min(old_value, old_delta); printf("iter:%d node %d old_delta %d\n", iterated_round, node, old_delta); printf("old value:%d new value:%d\n", old_value, new_value); if (new_value != old_value) { printf("node: %d old_delta:%d\n", node, old_delta); node_distances[node] = new_value; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = old_delta; } } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&iterated_round, &work_target, &graph, &edge_weights, &node_last_delta, &node_delta]( index_t edge, index_t size, distance_t old_delta) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; if (iterated_round % 2) { if (new_delta < atomicMin(node_last_delta.get_item_ptr(dest), new_delta)) work_target.append_work(DistanceData(iterated_round, dest, new_delta)); } else { if (new_delta < atomicMin(node_delta.get_item_ptr(dest), new_delta)) work_target.append_work(DistanceData(iterated_round, dest, new_delta)); } } ); } } }; template<> /// SSSP work without CTA support struct SSSPWork<false> { template< typename WorkSource, typename WorkTarget, typename TGraph, typename TWeightDatum, typename TDistanceDatum> __device__ static void work( const uint32_t iterated_round, const WorkSource &work_source, WorkTarget &work_target, const TGraph &graph, TWeightDatum &edge_weights, TDistanceDatum &node_distances, TDistanceDatum &node_delta, TDistanceDatum &node_last_delta ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.get_work(i).m_node; distance_t old_value = node_distances.get_item(node); distance_t old_delta; if (iterated_round % 2) old_delta = atomicExch(node_delta.get_item_ptr(node), INF); else old_delta = atomicExch(node_last_delta.get_item_ptr(node), INF); distance_t new_value = min(old_value, old_delta); if (new_value != old_value) { for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; if (iterated_round % 2) { if (new_delta < atomicMin(node_last_delta.get_item_ptr(node), new_delta)) { work_target.append_work(DistanceData(iterated_round, dest, new_delta)); } } else { if (new_delta < atomicMin(node_delta.get_item_ptr(node), new_delta)) { work_target.append_work(DistanceData(iterated_round, dest, new_delta)); } } } } } } }; //DWCallbacks instance per GPU struct DWCallbacks { private: groute::graphs::dev::CSRGraphSeg m_graph_seg; groute::graphs::dev::GraphDatum<distance_t> m_distances_datum; groute::graphs::dev::GraphDatum<distance_t> m_delta_datum; groute::graphs::dev::GraphDatum<distance_t> m_last_delta_datum; public: template<typename...UnusedData> DWCallbacks( const groute::graphs::dev::CSRGraphSeg &graph_seg, const groute::graphs::dev::GraphDatumSeg<distance_t> &weights_datum, const groute::graphs::dev::GraphDatum<distance_t> &distances_datum, const groute::graphs::dev::GraphDatum<distance_t> &delta_datum, const groute::graphs::dev::GraphDatum<distance_t> &last_datum, UnusedData &... data) : m_graph_seg(graph_seg), m_distances_datum(distances_datum), m_delta_datum(delta_datum), m_last_delta_datum(last_datum) { } DWCallbacks() {} __device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t &work) { printf("call on_receive %d,%d,%d\n", work.iterated_round, work.node, work.distance); if (m_graph_seg.owns(work.node)) { // return groute::SF_Take; if (work.iterated_round % 2) { return (work.distance < atomicMin(m_delta_datum.get_item_ptr(work.node), work.distance)) ? groute::SF_Take : groute::SF_None; // Filter } else { return (work.distance < atomicMin(m_last_delta_datum.get_item_ptr(work.node), work.distance)) ? groute::SF_Take : groute::SF_None; // Filter } } return groute::SF_Pass; } __device__ __forceinline__ bool should_defer(const local_work_t &work, const distance_t &global_threshold) { printf("iter:%u delta:%u\n", work.m_iterated_round, work.m_node); bool defer; if (work.m_iterated_round % 2) defer = m_delta_datum[work.m_node] > global_threshold; else defer = m_last_delta_datum[work.m_node] > global_threshold; if (defer) printf("defer\n"); return defer; } __device__ __forceinline__ groute::SplitFlags on_send(local_work_t work) { printf("call on_send\n"); return (m_graph_seg.owns(work.m_node)) ? groute::SF_Take : groute::SF_Pass; } __device__ __forceinline__ remote_work_t pack(local_work_t work) { printf("iterated round:%d\n", work.m_iterated_round); return DistanceData(work.m_iterated_round, work.m_node, m_distances_datum.get_item(work.m_node)); } __device__ __forceinline__ local_work_t unpack(const remote_work_t &work) { return LocalData(work.iterated_round, work.node); } }; struct Algo { static const char *NameLower() { return "sssp"; } static const char *Name() { return "SSSP"; } static void HostInit( utils::traversal::Context<sssp::Algo> &context, groute::graphs::multi::CSRGraphAllocator &graph_manager, groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist) { // Get a valid source_node from flag index_t source_node = min(max((index_t) 0, (index_t) FLAGS_source_node), context.host_graph.nnodes - 1); // Map to the (possibly new) partitioned vertex space source_node = graph_manager.GetGraphPartitioner()->ReverseLookup(source_node); assert(source_node == 0); // Host endpoint for sending initial work groute::Endpoint host = groute::Endpoint::HostEndpoint(0); // Report the initial work distributed_worklist.ReportInitialWork(1, host); std::vector<remote_work_t> initial_work; initial_work.push_back(remote_work_t(1, source_node, 0)); distributed_worklist .GetLink(host) .Send(groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event()); printf("Host Init finished\n"); } template<typename TGraph, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static void DeviceMemset(groute::Stream &stream, TGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, TDistanceDatum &delta_datum, TDistanceDatum &last_datum, const UnusedData &... data) { dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, distances_datum.size); index_t s_node = 0; SSSPMemsetKernel << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( s_node, distances_datum.data_ptr, delta_datum.data_ptr, last_datum.data_ptr, distances_datum.size); } template<typename TGraph, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static void DeviceInit( groute::Endpoint endpoint, groute::Stream &stream, groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist, groute::IDistributedWorklistPeer<local_work_t, remote_work_t, DWCallbacks> *peer, TGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, TDistanceDatum &delta_datum, TDistanceDatum &last_datum, const UnusedData &... data) { } template< typename TGraphAllocator, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static const std::vector<distance_t> & Gather(TGraphAllocator &graph_allocator, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, TDistanceDatum &delta_datum, TDistanceDatum &last_delta_datum, UnusedData &... data) { //graph_allocator.GatherDatum(distances_datum); //graph_allocator.GatherDatum(delta_datum); return distances_datum.GetHostData(); } template< typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static std::vector<distance_t> Host(groute::graphs::host::CSRGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, TDistanceDatum &delta_datum, TDistanceDatum &last_delta_datum, UnusedData &... data) { return SSSPHostNaive(graph, weights_datum.GetHostDataPtr(), min(max((index_t) 0, (index_t) FLAGS_source_node), graph.nnodes - 1)); } static int Output(const char *file, const std::vector<distance_t> &distances) { return SSSPOutput(file, distances); } static int CheckErrors(const std::vector<distance_t> &distances, const std::vector<distance_t> &regression) { return SSSPCheckErrors(distances, regression); } }; using EdgeWeightDatumType = groute::graphs::multi::EdgeInputDatum<distance_t>; using NodeDistanceDatumType = groute::graphs::multi::NodeOutputGlobalDatum<distance_t>; template<bool IterationFusion = true, bool CTAScheduling = true> using FusedWorkerType = groute::FusedWorker< IterationFusion, local_work_t, remote_work_t, int, DWCallbacks, SSSPWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, EdgeWeightDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType>; template<bool CTAScheduling = true> using WorkerType = groute::Worker< local_work_t, remote_work_t, DWCallbacks, SSSPWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, EdgeWeightDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType>; template<typename TWorker> using RunnerType = utils::traversal::Runner< Algo, TWorker, DWCallbacks, local_work_t, remote_work_t, EdgeWeightDatumType, NodeDistanceDatumType, NodeDistanceDatumType, NodeDistanceDatumType>; } template<typename TWorker> bool TestSSSPAsyncMultiTemplate(int ngpus) { sssp::RunnerType<TWorker> runner; sssp::EdgeWeightDatumType edge_weights; sssp::NodeDistanceDatumType node_distances; sssp::NodeDistanceDatumType node_delta; sssp::NodeDistanceDatumType node_last_delta; printf("%u %u %u\n", node_distances, node_delta, node_last_delta); bool res = runner(ngpus, FLAGS_prio_delta, edge_weights, node_distances, node_delta, node_last_delta); return res; } //bool TestSSSPAsyncMultiOptimized(int ngpus) { // return FLAGS_cta_np // ? FLAGS_iteration_fusion // ? TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, true >>(ngpus) // : TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<false, true >>(ngpus) // : FLAGS_iteration_fusion // ? TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, false >>(ngpus) // : TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<false, false >>(ngpus); //} // //bool TestSSSPAsyncMulti(int ngpus) { // return FLAGS_cta_np // ? TestSSSPAsyncMultiTemplate<sssp::WorkerType<true >>(ngpus) // : TestSSSPAsyncMultiTemplate<sssp::WorkerType<false >>(ngpus); //} bool TestSSSPSingle() { return TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, true>>(1); }
b33541d6c5ac4f651a8f4962c7d9bb3690ee890d.cu
// Groute: An Asynchronous Multi-GPU Programming Framework // http://www.github.com/groute/groute // Copyright (c) 2017, A. Barak // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // * Redistributions of source code must retain the above copyright notice, // this list of conditions and the following disclaimer. // * Redistributions in binary form must reproduce the above copyright notice, // this list of conditions and the following disclaimer in the documentation // and/or other materials provided with the distribution. // * Neither the names of the copyright holders nor the names of its // contributors may be used to endorse or promote products derived from this // software without specific prior written permission. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" // AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE // IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE // ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE // LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR // CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF // SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS // INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN // CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) // ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE // POSSIBILITY OF SUCH DAMAGE. #include <vector> #include <algorithm> #include <thread> #include <memory> #include <random> #include <gflags/gflags.h> #include <groute/device/cta_scheduler.cuh> #include <groute/graphs/csr_graph.h> #include <groute/dwl/distributed_worklist.cuh> #include <groute/dwl/workers.cuh> #include <utils/graphs/traversal.h> #include <device_launch_parameters.h> #include "sssp_common.h" DECLARE_int32(source_node); const distance_t INF = UINT_MAX; namespace sssp { struct LocalData { index_t m_node; uint32_t m_iterated_round; __device__ __host__ __forceinline__ LocalData(uint32_t iterated_round, index_t node) : m_iterated_round(iterated_round), m_node(node) { } }; struct DistanceData { index_t node; distance_t distance; uint32_t iterated_round; uint32_t padding; __device__ __host__ __forceinline__ DistanceData(uint32_t iterated_round, index_t node, distance_t distance) : iterated_round(iterated_round), node(node), distance(distance) {} __device__ __host__ __forceinline__ DistanceData() : iterated_round(0), node(INF), distance(INF) { printf("call DistanceData default constructor\n"); } }; typedef LocalData local_work_t; typedef DistanceData remote_work_t; __global__ void SSSPMemsetKernel(index_t source_node, distance_t *distances, distance_t *delta, distance_t *last_delta, int nnodes) { int tid = TID_1D; if (tid < nnodes) { distances[tid] = INF; delta[tid] = INF; last_delta[tid] = INF; // if (tid == source_node) { // delta[tid] = 0; // last_delta[tid] = 0; // } } } template<bool CTAScheduling = true> /// SSSP work with Collective Thread Array scheduling for exploiting nested parallelism struct SSSPWork { template< typename WorkSource, typename WorkTarget, typename TGraph, typename TWeightDatum, typename TDistanceDatum> __device__ static void work( const uint32_t iterated_round, const WorkSource &work_source, WorkTarget &work_target, const TGraph &graph, TWeightDatum &edge_weights, TDistanceDatum &node_distances, TDistanceDatum &node_delta, TDistanceDatum &node_last_delta) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); uint32_t work_size_rup = round_up(work_size, blockDim.x) * blockDim.x; // we want all threads in active blocks to enter the loop for (uint32_t i = 0 + tid; i < work_size_rup; i += nthreads) { groute::dev::np_local<distance_t> np_local = {0, 0, 0}; if (i < work_size) { index_t node = work_source.get_work(i).m_node; distance_t old_value = node_distances[node]; printf("abc: iter:%u %u %u %u\n", iterated_round, node_distances[node], node_delta[node], node_last_delta[node]); distance_t old_delta; if (iterated_round % 2) old_delta = atomicExch(node_delta.get_item_ptr(node), INF); else old_delta = atomicExch(node_last_delta.get_item_ptr(node), INF); distance_t new_value = min(old_value, old_delta); printf("iter:%d node %d old_delta %d\n", iterated_round, node, old_delta); printf("old value:%d new value:%d\n", old_value, new_value); if (new_value != old_value) { printf("node: %d old_delta:%d\n", node, old_delta); node_distances[node] = new_value; np_local.start = graph.begin_edge(node); np_local.size = graph.end_edge(node) - np_local.start; np_local.meta_data = old_delta; } } groute::dev::CTAWorkScheduler<distance_t>::template schedule( np_local, [&iterated_round, &work_target, &graph, &edge_weights, &node_last_delta, &node_delta]( index_t edge, index_t size, distance_t old_delta) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; if (iterated_round % 2) { if (new_delta < atomicMin(node_last_delta.get_item_ptr(dest), new_delta)) work_target.append_work(DistanceData(iterated_round, dest, new_delta)); } else { if (new_delta < atomicMin(node_delta.get_item_ptr(dest), new_delta)) work_target.append_work(DistanceData(iterated_round, dest, new_delta)); } } ); } } }; template<> /// SSSP work without CTA support struct SSSPWork<false> { template< typename WorkSource, typename WorkTarget, typename TGraph, typename TWeightDatum, typename TDistanceDatum> __device__ static void work( const uint32_t iterated_round, const WorkSource &work_source, WorkTarget &work_target, const TGraph &graph, TWeightDatum &edge_weights, TDistanceDatum &node_distances, TDistanceDatum &node_delta, TDistanceDatum &node_last_delta ) { uint32_t tid = TID_1D; uint32_t nthreads = TOTAL_THREADS_1D; uint32_t work_size = work_source.get_size(); for (uint32_t i = 0 + tid; i < work_size; i += nthreads) { index_t node = work_source.get_work(i).m_node; distance_t old_value = node_distances.get_item(node); distance_t old_delta; if (iterated_round % 2) old_delta = atomicExch(node_delta.get_item_ptr(node), INF); else old_delta = atomicExch(node_last_delta.get_item_ptr(node), INF); distance_t new_value = min(old_value, old_delta); if (new_value != old_value) { for (index_t edge = graph.begin_edge(node), end_edge = graph.end_edge(node); edge < end_edge; ++edge) { index_t dest = graph.edge_dest(edge); distance_t weight = edge_weights.get_item(edge); distance_t new_delta = old_delta + weight; if (iterated_round % 2) { if (new_delta < atomicMin(node_last_delta.get_item_ptr(node), new_delta)) { work_target.append_work(DistanceData(iterated_round, dest, new_delta)); } } else { if (new_delta < atomicMin(node_delta.get_item_ptr(node), new_delta)) { work_target.append_work(DistanceData(iterated_round, dest, new_delta)); } } } } } } }; //DWCallbacks instance per GPU struct DWCallbacks { private: groute::graphs::dev::CSRGraphSeg m_graph_seg; groute::graphs::dev::GraphDatum<distance_t> m_distances_datum; groute::graphs::dev::GraphDatum<distance_t> m_delta_datum; groute::graphs::dev::GraphDatum<distance_t> m_last_delta_datum; public: template<typename...UnusedData> DWCallbacks( const groute::graphs::dev::CSRGraphSeg &graph_seg, const groute::graphs::dev::GraphDatumSeg<distance_t> &weights_datum, const groute::graphs::dev::GraphDatum<distance_t> &distances_datum, const groute::graphs::dev::GraphDatum<distance_t> &delta_datum, const groute::graphs::dev::GraphDatum<distance_t> &last_datum, UnusedData &... data) : m_graph_seg(graph_seg), m_distances_datum(distances_datum), m_delta_datum(delta_datum), m_last_delta_datum(last_datum) { } DWCallbacks() {} __device__ __forceinline__ groute::SplitFlags on_receive(const remote_work_t &work) { printf("call on_receive %d,%d,%d\n", work.iterated_round, work.node, work.distance); if (m_graph_seg.owns(work.node)) { // return groute::SF_Take; if (work.iterated_round % 2) { return (work.distance < atomicMin(m_delta_datum.get_item_ptr(work.node), work.distance)) ? groute::SF_Take : groute::SF_None; // Filter } else { return (work.distance < atomicMin(m_last_delta_datum.get_item_ptr(work.node), work.distance)) ? groute::SF_Take : groute::SF_None; // Filter } } return groute::SF_Pass; } __device__ __forceinline__ bool should_defer(const local_work_t &work, const distance_t &global_threshold) { printf("iter:%u delta:%u\n", work.m_iterated_round, work.m_node); bool defer; if (work.m_iterated_round % 2) defer = m_delta_datum[work.m_node] > global_threshold; else defer = m_last_delta_datum[work.m_node] > global_threshold; if (defer) printf("defer\n"); return defer; } __device__ __forceinline__ groute::SplitFlags on_send(local_work_t work) { printf("call on_send\n"); return (m_graph_seg.owns(work.m_node)) ? groute::SF_Take : groute::SF_Pass; } __device__ __forceinline__ remote_work_t pack(local_work_t work) { printf("iterated round:%d\n", work.m_iterated_round); return DistanceData(work.m_iterated_round, work.m_node, m_distances_datum.get_item(work.m_node)); } __device__ __forceinline__ local_work_t unpack(const remote_work_t &work) { return LocalData(work.iterated_round, work.node); } }; struct Algo { static const char *NameLower() { return "sssp"; } static const char *Name() { return "SSSP"; } static void HostInit( utils::traversal::Context<sssp::Algo> &context, groute::graphs::multi::CSRGraphAllocator &graph_manager, groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist) { // Get a valid source_node from flag index_t source_node = min(max((index_t) 0, (index_t) FLAGS_source_node), context.host_graph.nnodes - 1); // Map to the (possibly new) partitioned vertex space source_node = graph_manager.GetGraphPartitioner()->ReverseLookup(source_node); assert(source_node == 0); // Host endpoint for sending initial work groute::Endpoint host = groute::Endpoint::HostEndpoint(0); // Report the initial work distributed_worklist.ReportInitialWork(1, host); std::vector<remote_work_t> initial_work; initial_work.push_back(remote_work_t(1, source_node, 0)); distributed_worklist .GetLink(host) .Send(groute::Segment<remote_work_t>(&initial_work[0], 1), groute::Event()); printf("Host Init finished\n"); } template<typename TGraph, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static void DeviceMemset(groute::Stream &stream, TGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, TDistanceDatum &delta_datum, TDistanceDatum &last_datum, const UnusedData &... data) { dim3 grid_dims, block_dims; KernelSizing(grid_dims, block_dims, distances_datum.size); index_t s_node = 0; SSSPMemsetKernel << < grid_dims, block_dims, 0, stream.cuda_stream >> > ( s_node, distances_datum.data_ptr, delta_datum.data_ptr, last_datum.data_ptr, distances_datum.size); } template<typename TGraph, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static void DeviceInit( groute::Endpoint endpoint, groute::Stream &stream, groute::IDistributedWorklist<local_work_t, remote_work_t> &distributed_worklist, groute::IDistributedWorklistPeer<local_work_t, remote_work_t, DWCallbacks> *peer, TGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, TDistanceDatum &delta_datum, TDistanceDatum &last_datum, const UnusedData &... data) { } template< typename TGraphAllocator, typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static const std::vector<distance_t> & Gather(TGraphAllocator &graph_allocator, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, TDistanceDatum &delta_datum, TDistanceDatum &last_delta_datum, UnusedData &... data) { //graph_allocator.GatherDatum(distances_datum); //graph_allocator.GatherDatum(delta_datum); return distances_datum.GetHostData(); } template< typename TWeightDatum, typename TDistanceDatum, typename...UnusedData> static std::vector<distance_t> Host(groute::graphs::host::CSRGraph &graph, TWeightDatum &weights_datum, TDistanceDatum &distances_datum, TDistanceDatum &delta_datum, TDistanceDatum &last_delta_datum, UnusedData &... data) { return SSSPHostNaive(graph, weights_datum.GetHostDataPtr(), min(max((index_t) 0, (index_t) FLAGS_source_node), graph.nnodes - 1)); } static int Output(const char *file, const std::vector<distance_t> &distances) { return SSSPOutput(file, distances); } static int CheckErrors(const std::vector<distance_t> &distances, const std::vector<distance_t> &regression) { return SSSPCheckErrors(distances, regression); } }; using EdgeWeightDatumType = groute::graphs::multi::EdgeInputDatum<distance_t>; using NodeDistanceDatumType = groute::graphs::multi::NodeOutputGlobalDatum<distance_t>; template<bool IterationFusion = true, bool CTAScheduling = true> using FusedWorkerType = groute::FusedWorker< IterationFusion, local_work_t, remote_work_t, int, DWCallbacks, SSSPWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, EdgeWeightDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType>; template<bool CTAScheduling = true> using WorkerType = groute::Worker< local_work_t, remote_work_t, DWCallbacks, SSSPWork<CTAScheduling>, groute::graphs::dev::CSRGraphSeg, EdgeWeightDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType, NodeDistanceDatumType::DeviceObjectType>; template<typename TWorker> using RunnerType = utils::traversal::Runner< Algo, TWorker, DWCallbacks, local_work_t, remote_work_t, EdgeWeightDatumType, NodeDistanceDatumType, NodeDistanceDatumType, NodeDistanceDatumType>; } template<typename TWorker> bool TestSSSPAsyncMultiTemplate(int ngpus) { sssp::RunnerType<TWorker> runner; sssp::EdgeWeightDatumType edge_weights; sssp::NodeDistanceDatumType node_distances; sssp::NodeDistanceDatumType node_delta; sssp::NodeDistanceDatumType node_last_delta; printf("%u %u %u\n", node_distances, node_delta, node_last_delta); bool res = runner(ngpus, FLAGS_prio_delta, edge_weights, node_distances, node_delta, node_last_delta); return res; } //bool TestSSSPAsyncMultiOptimized(int ngpus) { // return FLAGS_cta_np // ? FLAGS_iteration_fusion // ? TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, true >>(ngpus) // : TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<false, true >>(ngpus) // : FLAGS_iteration_fusion // ? TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, false >>(ngpus) // : TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<false, false >>(ngpus); //} // //bool TestSSSPAsyncMulti(int ngpus) { // return FLAGS_cta_np // ? TestSSSPAsyncMultiTemplate<sssp::WorkerType<true >>(ngpus) // : TestSSSPAsyncMultiTemplate<sssp::WorkerType<false >>(ngpus); //} bool TestSSSPSingle() { return TestSSSPAsyncMultiTemplate<sssp::FusedWorkerType<true, true>>(1); }
ec6175aa2a88e7feb271c099ebd8bbab4e69ee86.hip
// !!! This is a file automatically generated by hipify!!! #include "SerializeDeserialize.cuh" void serializeNeuralNet(NeuralNet* nn, char* fileName){ // Opens the file for writing FILE* file=fopen(fileName, "w"); // Writes the layer data fprintf(file, "%d\n", nn->layers); // Writes the neuron data for(int layer=0; layer<nn->layers; layer++){ fprintf(file, "%d\n", nn->neurons[layer]); } // Writes the weight data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron1=0; neuron1<nn->neurons[layer]; neuron1++){ for(int neuron2=0; neuron2<nn->neurons[layer+1]; neuron2++){ fprintf(file, "%lf\n", nn->weights[layer][neuron1][neuron2]); } } } // Writes the bias data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fprintf(file, "%lf\n", nn->biases[layer][neuron]); } } // Writes the activation data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fprintf(file, "%d\n", nn->activations[layer][neuron]); } } fclose(file); } NeuralNet* deserializeNeuralNet(char* fileName){ FILE* file=fopen(fileName, "r"); NeuralNet* nn; hipMallocManaged(&nn, 1*sizeof(NeuralNet)); // Gets the layers fscanf(file, "%d\n", &nn->layers); // Gets the neuron data hipMallocManaged(&nn->neurons, nn->layers*sizeof(int)); for(int layer=0; layer<nn->layers; layer++){ fscanf(file, "%d\n", &nn->neurons[layer]); } // Gets the weight data hipMallocManaged(&nn->weights, (nn->layers-1)*sizeof(double**)); for(int layer=0; layer<nn->layers-1; layer++){ hipMallocManaged(&nn->weights[layer], nn->neurons[layer]*sizeof(double*)); for(int neuron1=0; neuron1<nn->neurons[layer]; neuron1++){ hipMallocManaged(&nn->weights[layer][neuron1], nn->neurons[layer+1]*sizeof(double)); for(int neuron2=0; neuron2<nn->neurons[layer+1]; neuron2++){ fscanf(file, "%lf\n", &nn->weights[layer][neuron1][neuron2]); //printf("Layer=%d\tNeuron1=%d\tNeuron2=%d\tWeight=%lf\n", layer, neuron1, neuron2, nn->weights[layer][neuron1][neuron2]); } } } // Gets the bias data hipMallocManaged(&nn->biases, (nn->layers-1)*sizeof(double*)); for(int layer=0; layer<nn->layers-1; layer++){ hipMallocManaged(&nn->biases[layer], nn->neurons[layer+1]*sizeof(double)); for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fscanf(file, "%lf\n", &nn->biases[layer][neuron]); } } // Gets the activation function data hipMallocManaged(&nn->activations, (nn->layers-1)*sizeof(activation*)); for(int layer=0; layer<nn->layers-1; layer++){ hipMallocManaged(&nn->activations[layer], nn->neurons[layer+1]*sizeof(activation)); for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fscanf(file, "%d\n", &nn->activations[layer][neuron]); } } fclose(file); return nn; } void serializeChessBoard(Piece** board, char* filename){ FILE* file=fopen(filename, "w"); fprintf(file, "\t"); for(int col=0; col<DIM; col++){ fprintf(file, "%c\t", ((int)'A')+col); } fprintf(file, "\n"); for(int row=0; row<DIM; row++){ fprintf(file, "%d\t", row); for(int col=0; col<DIM; col++){ if(board[row][col].numberConversion==0){ fprintf(file, "______\t"); } else{ fprintf(file, "__"); if(board[row][col].piece.color==0){ fprintf(file, "W"); } else{ fprintf(file, "B"); } if(board[row][col].piece.isPawn){ fprintf(file, "P"); } else if(board[row][col].piece.isRook){ fprintf(file, "R"); } else if(board[row][col].piece.isKnight){ fprintf(file, "N"); } else if(board[row][col].piece.isBishop){ fprintf(file, "B"); } else if(board[row][col].piece.isQueen){ fprintf(file, "Q"); } else{ fprintf(file, "K"); } fprintf(file, "__\t"); } } fprintf(file, "\n"); } fprintf(file, "\n======================================================\n\n"); for(int row=0; row<DIM; row++){ for(int col=0; col<DIM; col++){ fprintf(file, "Row %d, Col %d, Num %d, clr %d, fst %d, Pwn %d, Rk %d, Knt %d, Bshp %d, Qn %d, Kng %d\n", row, col, board[row][col].numberConversion, board[row][col].piece.color, board[row][col].piece.isFirstMove, board[row][col].piece.isPawn, board[row][col].piece.isRook, board[row][col].piece.isKnight, board[row][col].piece.isBishop, board[row][col].piece.isQueen, board[row][col].piece.isKing); } } fclose(file); }
ec6175aa2a88e7feb271c099ebd8bbab4e69ee86.cu
#include "SerializeDeserialize.cuh" void serializeNeuralNet(NeuralNet* nn, char* fileName){ // Opens the file for writing FILE* file=fopen(fileName, "w"); // Writes the layer data fprintf(file, "%d\n", nn->layers); // Writes the neuron data for(int layer=0; layer<nn->layers; layer++){ fprintf(file, "%d\n", nn->neurons[layer]); } // Writes the weight data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron1=0; neuron1<nn->neurons[layer]; neuron1++){ for(int neuron2=0; neuron2<nn->neurons[layer+1]; neuron2++){ fprintf(file, "%lf\n", nn->weights[layer][neuron1][neuron2]); } } } // Writes the bias data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fprintf(file, "%lf\n", nn->biases[layer][neuron]); } } // Writes the activation data for(int layer=0; layer<nn->layers-1; layer++){ for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fprintf(file, "%d\n", nn->activations[layer][neuron]); } } fclose(file); } NeuralNet* deserializeNeuralNet(char* fileName){ FILE* file=fopen(fileName, "r"); NeuralNet* nn; cudaMallocManaged(&nn, 1*sizeof(NeuralNet)); // Gets the layers fscanf(file, "%d\n", &nn->layers); // Gets the neuron data cudaMallocManaged(&nn->neurons, nn->layers*sizeof(int)); for(int layer=0; layer<nn->layers; layer++){ fscanf(file, "%d\n", &nn->neurons[layer]); } // Gets the weight data cudaMallocManaged(&nn->weights, (nn->layers-1)*sizeof(double**)); for(int layer=0; layer<nn->layers-1; layer++){ cudaMallocManaged(&nn->weights[layer], nn->neurons[layer]*sizeof(double*)); for(int neuron1=0; neuron1<nn->neurons[layer]; neuron1++){ cudaMallocManaged(&nn->weights[layer][neuron1], nn->neurons[layer+1]*sizeof(double)); for(int neuron2=0; neuron2<nn->neurons[layer+1]; neuron2++){ fscanf(file, "%lf\n", &nn->weights[layer][neuron1][neuron2]); //printf("Layer=%d\tNeuron1=%d\tNeuron2=%d\tWeight=%lf\n", layer, neuron1, neuron2, nn->weights[layer][neuron1][neuron2]); } } } // Gets the bias data cudaMallocManaged(&nn->biases, (nn->layers-1)*sizeof(double*)); for(int layer=0; layer<nn->layers-1; layer++){ cudaMallocManaged(&nn->biases[layer], nn->neurons[layer+1]*sizeof(double)); for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fscanf(file, "%lf\n", &nn->biases[layer][neuron]); } } // Gets the activation function data cudaMallocManaged(&nn->activations, (nn->layers-1)*sizeof(activation*)); for(int layer=0; layer<nn->layers-1; layer++){ cudaMallocManaged(&nn->activations[layer], nn->neurons[layer+1]*sizeof(activation)); for(int neuron=0; neuron<nn->neurons[layer+1]; neuron++){ fscanf(file, "%d\n", &nn->activations[layer][neuron]); } } fclose(file); return nn; } void serializeChessBoard(Piece** board, char* filename){ FILE* file=fopen(filename, "w"); fprintf(file, "\t"); for(int col=0; col<DIM; col++){ fprintf(file, "%c\t", ((int)'A')+col); } fprintf(file, "\n"); for(int row=0; row<DIM; row++){ fprintf(file, "%d\t", row); for(int col=0; col<DIM; col++){ if(board[row][col].numberConversion==0){ fprintf(file, "______\t"); } else{ fprintf(file, "__"); if(board[row][col].piece.color==0){ fprintf(file, "W"); } else{ fprintf(file, "B"); } if(board[row][col].piece.isPawn){ fprintf(file, "P"); } else if(board[row][col].piece.isRook){ fprintf(file, "R"); } else if(board[row][col].piece.isKnight){ fprintf(file, "N"); } else if(board[row][col].piece.isBishop){ fprintf(file, "B"); } else if(board[row][col].piece.isQueen){ fprintf(file, "Q"); } else{ fprintf(file, "K"); } fprintf(file, "__\t"); } } fprintf(file, "\n"); } fprintf(file, "\n======================================================\n\n"); for(int row=0; row<DIM; row++){ for(int col=0; col<DIM; col++){ fprintf(file, "Row %d, Col %d, Num %d, clr %d, fst %d, Pwn %d, Rk %d, Knt %d, Bshp %d, Qn %d, Kng %d\n", row, col, board[row][col].numberConversion, board[row][col].piece.color, board[row][col].piece.isFirstMove, board[row][col].piece.isPawn, board[row][col].piece.isRook, board[row][col].piece.isKnight, board[row][col].piece.isBishop, board[row][col].piece.isQueen, board[row][col].piece.isKing); } } fclose(file); }
91bae0caf2df5bbaa4d915451a308104f1ced8aa.hip
// !!! This is a file automatically generated by hipify!!! #include "shared.h" #include <cstdio> #include <cmath> #include <thrust/complex.h> #include <hip/hip_runtime.h> //#include "rocblas.h" extern "C"{ /*Reaproveite este kernel pois ele pode ser usado para resolver a parte * de deslocamentos de pontos internos de Interec.for*/ __global__ void ghmatecd_kernel( int cone[], FREAL cx[], FREAL cy[], FREAL cz[], FREAL cxm[], FREAL cym[], FREAL czm[], thrust::complex<FREAL> zh[], thrust::complex<FREAL> zg[], FREAL rn[][3], thrust::complex<FREAL> zge, thrust::complex<FREAL> zcs, thrust::complex<FREAL> zcp, FREAL fr, FREAL gi[], FREAL ome[], FREAL c1, FREAL c2, FREAL c3, FREAL c4, //int npg, int n, int nbe, int dim_cone, int fast_singular, int* ret ); void cuda_interec1_(int* n, int* nbe, int* npg, int* l, int* np, FREAL cxi[], FREAL cyi[], FREAL czi[], thrust::complex<FREAL>* zge, thrust::complex<FREAL>* zcs, thrust::complex<FREAL>* zcp, FREAL* c1, FREAL* c2, FREAL* c3, FREAL* c4, FREAL* fr, thrust::complex<FREAL> zdfi[], thrust::complex<FREAL> zfi[], thrust::complex<FREAL> zdsol[], int* status ) { dim3 threadsPerBlock(*npg,*npg); dim3 numBlocks(*nbe, *l); int shared_mem_size = 2*3*3*(*npg)*(*npg)*sizeof(thrust::complex<FREAL>); hipError_t error; thrust::complex<FREAL> zhelem[3][3]; thrust::complex<FREAL> zgelem[3][3]; thrust::complex<FREAL>* device_zh; thrust::complex<FREAL>* device_zg; thrust::complex<FREAL>* device_zdfi; thrust::complex<FREAL>* device_zfi; thrust::complex<FREAL>* device_zdsol; FREAL* device_cxi; FREAL* device_cyi; FREAL* device_czi; int* device_return_status; int return_status; thrust::complex<FREAL> one(1., 0.); thrust::complex<FREAL> zero(0., 0.); thrust::complex<FREAL> minus_one(-1., 0.); hipblasHandle_t handle; hipblasStatus_t stats; error = hipMalloc(&device_return_status, sizeof(int)); cuda_assert(error); error = hipMalloc(&device_zh, (3*(*nbe))*(3*(*l))*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = hipMalloc(&device_zg, (3*(*nbe))*(3*(*l))*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = hipMemset(device_return_status, 0, sizeof(int)); cuda_assert(error); error = hipMemset(device_zh, 0, (3*(*nbe))*(3*(*l))*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = hipMemset(device_zg, 0, (3*(*nbe))*(3*(*l))*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = hipMalloc(&device_cxi, (*l)*sizeof(FREAL)); cuda_assert(error); error = hipMalloc(&device_cyi, (*l)*sizeof(FREAL)); cuda_assert(error); error = hipMalloc(&device_czi, (*l)*sizeof(FREAL)); cuda_assert(error); error = hipMemcpy(device_cxi, cxi, (*l)*sizeof(FREAL), hipMemcpyHostToDevice); cuda_assert(error); error = hipMemcpy(device_cyi, cyi, (*l)*sizeof(FREAL), hipMemcpyHostToDevice); cuda_assert(error); error = hipMemcpy(device_czi, czi, (*l)*sizeof(FREAL), hipMemcpyHostToDevice); cuda_assert(error); hipLaunchKernelGGL(( ghmatecd_kernel), dim3(numBlocks), dim3(threadsPerBlock), shared_mem_size, 0, device_cone, device_cx, device_cy, device_cz, device_cxi, device_cyi, device_czi, device_zh, device_zg, (FREAL (*)[3]) device_etas, *zge, *zcs, *zcp, *fr, device_gi, device_ome, *c1, *c2, *c3, *c4, // *npg, *nbe, *l, *n, 0, device_return_status ); hipDeviceSynchronize(); error = hipMalloc(&device_zdsol, 3*(*l)*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = hipMalloc(&device_zdfi, 3*(*nbe)*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = hipMalloc(&device_zfi, 3*(*nbe)*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = hipMemcpy(device_zdfi, zdfi, 3*(*nbe)*sizeof(thrust::complex<FREAL>), hipMemcpyHostToDevice); cuda_assert(error); error = hipMemcpy(device_zfi, zfi, 3*(*nbe)*sizeof(thrust::complex<FREAL>), hipMemcpyHostToDevice); cuda_assert(error); hipDeviceSynchronize(); error = hipMemcpy(&return_status, device_return_status, sizeof(int), hipMemcpyDeviceToHost); cuda_assert(error); if (return_status != 0) { fputs("Matriz Singular\n", stderr); } stats = hipblasCreate(&handle); cublas_assert(stats); if (sizeof(FREAL) == 8) { stats = hipblasZgemv(handle, HIPBLAS_OP_N, 3*(*l), 3*(*nbe), (hipDoubleComplex*) &one, (hipDoubleComplex*) device_zg, 3*(*l), (hipDoubleComplex*) device_zdfi, 1, (hipDoubleComplex*) &zero, (hipDoubleComplex*) device_zdsol, 1); cublas_assert(stats); hipDeviceSynchronize(); stats = hipblasZgemv(handle, HIPBLAS_OP_N, 3*(*l), 3*(*nbe), (hipDoubleComplex*) &(minus_one), (hipDoubleComplex*) device_zh, 3*(*l), (hipDoubleComplex*) device_zfi, 1, (hipDoubleComplex*) &one, (hipDoubleComplex*) device_zdsol, 1); cublas_assert(stats); hipDeviceSynchronize(); } else { stats = hipblasCgemv(handle, HIPBLAS_OP_N, 3*(*l), 3*(*nbe), (hipComplex*) &one, (hipComplex*) device_zg, 3*(*l), (hipComplex*) device_zdfi, 1, (hipComplex*) &zero, (hipComplex*) device_zdsol, 1); cublas_assert(stats); hipDeviceSynchronize(); stats = hipblasCgemv(handle, HIPBLAS_OP_N, 3*(*l), 3*(*nbe), (hipComplex*) &(minus_one), (hipComplex*) device_zh, 3*(*l), (hipComplex*) device_zfi, 1, (hipComplex*) &one, (hipComplex*) device_zdsol, 1); cublas_assert(stats); hipDeviceSynchronize(); } error = hipMemcpy(zdsol, device_zdsol, 3*(*l)*sizeof(thrust::complex<FREAL>), hipMemcpyDeviceToHost); cuda_assert(error); error = hipFree(device_zh); cuda_assert(error); error = hipFree(device_zg); *status = return_status; error = hipFree(device_return_status); cuda_assert(error); error = hipFree(device_cxi); cuda_assert(error); error = hipFree(device_cyi); cuda_assert(error); error = hipFree(device_czi); cuda_assert(error); error = hipFree(device_zfi); cuda_assert(error); error = hipFree(device_zdfi); cuda_assert(error); error = hipFree(device_zdsol); cuda_assert(error); hipblasDestroy(handle); } }
91bae0caf2df5bbaa4d915451a308104f1ced8aa.cu
#include "shared.h" #include <cstdio> #include <cmath> #include <thrust/complex.h> #include <cuda_runtime.h> //#include "cublas_v2.h" extern "C"{ /*Reaproveite este kernel pois ele pode ser usado para resolver a parte * de deslocamentos de pontos internos de Interec.for*/ __global__ void ghmatecd_kernel( int cone[], FREAL cx[], FREAL cy[], FREAL cz[], FREAL cxm[], FREAL cym[], FREAL czm[], thrust::complex<FREAL> zh[], thrust::complex<FREAL> zg[], FREAL rn[][3], thrust::complex<FREAL> zge, thrust::complex<FREAL> zcs, thrust::complex<FREAL> zcp, FREAL fr, FREAL gi[], FREAL ome[], FREAL c1, FREAL c2, FREAL c3, FREAL c4, //int npg, int n, int nbe, int dim_cone, int fast_singular, int* ret ); void cuda_interec1_(int* n, int* nbe, int* npg, int* l, int* np, FREAL cxi[], FREAL cyi[], FREAL czi[], thrust::complex<FREAL>* zge, thrust::complex<FREAL>* zcs, thrust::complex<FREAL>* zcp, FREAL* c1, FREAL* c2, FREAL* c3, FREAL* c4, FREAL* fr, thrust::complex<FREAL> zdfi[], thrust::complex<FREAL> zfi[], thrust::complex<FREAL> zdsol[], int* status ) { dim3 threadsPerBlock(*npg,*npg); dim3 numBlocks(*nbe, *l); int shared_mem_size = 2*3*3*(*npg)*(*npg)*sizeof(thrust::complex<FREAL>); cudaError_t error; thrust::complex<FREAL> zhelem[3][3]; thrust::complex<FREAL> zgelem[3][3]; thrust::complex<FREAL>* device_zh; thrust::complex<FREAL>* device_zg; thrust::complex<FREAL>* device_zdfi; thrust::complex<FREAL>* device_zfi; thrust::complex<FREAL>* device_zdsol; FREAL* device_cxi; FREAL* device_cyi; FREAL* device_czi; int* device_return_status; int return_status; thrust::complex<FREAL> one(1., 0.); thrust::complex<FREAL> zero(0., 0.); thrust::complex<FREAL> minus_one(-1., 0.); cublasHandle_t handle; cublasStatus_t stats; error = cudaMalloc(&device_return_status, sizeof(int)); cuda_assert(error); error = cudaMalloc(&device_zh, (3*(*nbe))*(3*(*l))*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = cudaMalloc(&device_zg, (3*(*nbe))*(3*(*l))*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = cudaMemset(device_return_status, 0, sizeof(int)); cuda_assert(error); error = cudaMemset(device_zh, 0, (3*(*nbe))*(3*(*l))*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = cudaMemset(device_zg, 0, (3*(*nbe))*(3*(*l))*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = cudaMalloc(&device_cxi, (*l)*sizeof(FREAL)); cuda_assert(error); error = cudaMalloc(&device_cyi, (*l)*sizeof(FREAL)); cuda_assert(error); error = cudaMalloc(&device_czi, (*l)*sizeof(FREAL)); cuda_assert(error); error = cudaMemcpy(device_cxi, cxi, (*l)*sizeof(FREAL), cudaMemcpyHostToDevice); cuda_assert(error); error = cudaMemcpy(device_cyi, cyi, (*l)*sizeof(FREAL), cudaMemcpyHostToDevice); cuda_assert(error); error = cudaMemcpy(device_czi, czi, (*l)*sizeof(FREAL), cudaMemcpyHostToDevice); cuda_assert(error); ghmatecd_kernel<<<numBlocks, threadsPerBlock, shared_mem_size>>>( device_cone, device_cx, device_cy, device_cz, device_cxi, device_cyi, device_czi, device_zh, device_zg, (FREAL (*)[3]) device_etas, *zge, *zcs, *zcp, *fr, device_gi, device_ome, *c1, *c2, *c3, *c4, // *npg, *nbe, *l, *n, 0, device_return_status ); cudaDeviceSynchronize(); error = cudaMalloc(&device_zdsol, 3*(*l)*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = cudaMalloc(&device_zdfi, 3*(*nbe)*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = cudaMalloc(&device_zfi, 3*(*nbe)*sizeof(thrust::complex<FREAL>)); cuda_assert(error); error = cudaMemcpy(device_zdfi, zdfi, 3*(*nbe)*sizeof(thrust::complex<FREAL>), cudaMemcpyHostToDevice); cuda_assert(error); error = cudaMemcpy(device_zfi, zfi, 3*(*nbe)*sizeof(thrust::complex<FREAL>), cudaMemcpyHostToDevice); cuda_assert(error); cudaDeviceSynchronize(); error = cudaMemcpy(&return_status, device_return_status, sizeof(int), cudaMemcpyDeviceToHost); cuda_assert(error); if (return_status != 0) { fputs("Matriz Singular\n", stderr); } stats = cublasCreate(&handle); cublas_assert(stats); if (sizeof(FREAL) == 8) { stats = cublasZgemv(handle, CUBLAS_OP_N, 3*(*l), 3*(*nbe), (cuDoubleComplex*) &one, (cuDoubleComplex*) device_zg, 3*(*l), (cuDoubleComplex*) device_zdfi, 1, (cuDoubleComplex*) &zero, (cuDoubleComplex*) device_zdsol, 1); cublas_assert(stats); cudaDeviceSynchronize(); stats = cublasZgemv(handle, CUBLAS_OP_N, 3*(*l), 3*(*nbe), (cuDoubleComplex*) &(minus_one), (cuDoubleComplex*) device_zh, 3*(*l), (cuDoubleComplex*) device_zfi, 1, (cuDoubleComplex*) &one, (cuDoubleComplex*) device_zdsol, 1); cublas_assert(stats); cudaDeviceSynchronize(); } else { stats = cublasCgemv(handle, CUBLAS_OP_N, 3*(*l), 3*(*nbe), (cuComplex*) &one, (cuComplex*) device_zg, 3*(*l), (cuComplex*) device_zdfi, 1, (cuComplex*) &zero, (cuComplex*) device_zdsol, 1); cublas_assert(stats); cudaDeviceSynchronize(); stats = cublasCgemv(handle, CUBLAS_OP_N, 3*(*l), 3*(*nbe), (cuComplex*) &(minus_one), (cuComplex*) device_zh, 3*(*l), (cuComplex*) device_zfi, 1, (cuComplex*) &one, (cuComplex*) device_zdsol, 1); cublas_assert(stats); cudaDeviceSynchronize(); } error = cudaMemcpy(zdsol, device_zdsol, 3*(*l)*sizeof(thrust::complex<FREAL>), cudaMemcpyDeviceToHost); cuda_assert(error); error = cudaFree(device_zh); cuda_assert(error); error = cudaFree(device_zg); *status = return_status; error = cudaFree(device_return_status); cuda_assert(error); error = cudaFree(device_cxi); cuda_assert(error); error = cudaFree(device_cyi); cuda_assert(error); error = cudaFree(device_czi); cuda_assert(error); error = cudaFree(device_zfi); cuda_assert(error); error = cudaFree(device_zdfi); cuda_assert(error); error = cudaFree(device_zdsol); cuda_assert(error); cublasDestroy(handle); } }
d81441bfe1065614500ffa38ae9d6f7ffbbb6eee.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "display.cuh" #include "device_helper.cuh" #include "glm_macro.h" #include "cuda_event.h" #include <hip/hip_fp16.h> #include <glm/vec3.hpp> __global__ void oneHalfChannelToWindowContentKernel(hipSurfaceObject_t surface, hipSurfaceObject_t window, float scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; unsigned short h_pixel; surf2Dread(&h_pixel, surface, i * 2, j, hipBoundaryModeZero); auto pixel = static_cast<unsigned char>(__half2float(h_pixel) * scale); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | pixel) << 8; pixel_w = (pixel_w | pixel) << 8; pixel_w = (pixel_w | pixel); surf2Dwrite(pixel_w, window, i * 4, j); } __global__ void oneFloatChannelToWindowContentKernel(hipSurfaceObject_t surface, hipSurfaceObject_t window, float scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float f_pixel; surf2Dread(&f_pixel, surface, i * 4, j, hipBoundaryModeZero); auto pixel = static_cast<unsigned char>(f_pixel * scale); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | pixel) << 8; pixel_w = (pixel_w | pixel) << 8; pixel_w = (pixel_w | pixel); surf2Dwrite(pixel_w, window, i * 4, j); } __global__ void fourFloatChannelToWindowContentKernel(hipSurfaceObject_t surface, hipSurfaceObject_t window, float scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float r, g, b; int idx = i * 16; surf2Dread(&r, surface, idx, j, hipBoundaryModeZero); surf2Dread(&g, surface, idx + 4, j, hipBoundaryModeZero); surf2Dread(&b, surface, idx + 8, j, hipBoundaryModeZero); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(b * scale)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(g * scale)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(r * scale)); surf2Dwrite(pixel_w, window, i * 4, j); } __global__ void normalMapToWindowContentKernel(hipSurfaceObject_t normal_map, hipSurfaceObject_t window, glm::mat3 inverse_sensor_rotation) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; glm::vec3 normal; int idx = i * 16; surf2Dread(&normal.x, normal_map, idx, j, hipBoundaryModeZero); surf2Dread(&normal.y, normal_map, idx + 4, j, hipBoundaryModeZero); surf2Dread(&normal.z, normal_map, idx + 8, j, hipBoundaryModeZero); normal = glm::normalize(inverse_sensor_rotation * normal); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(normal.z * 255.0f)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(normal.y * 255.0f)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(normal.x * 255.0f)); surf2Dwrite(pixel_w, window, i * 4, j); } __global__ void shadingToWindowContentKernel(hipSurfaceObject_t normal_map, hipSurfaceObject_t window, Sensor sensor) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; glm::vec3 normal; int idx = i * 16; surf2Dread(&normal.x, normal_map, idx, j, hipBoundaryModeZero); surf2Dread(&normal.y, normal_map, idx + 4, j, hipBoundaryModeZero); surf2Dread(&normal.z, normal_map, idx + 8, j, hipBoundaryModeZero); auto ray_direction = glm::normalize(glm::mat3(sensor.getPose()) * sensor.getInverseIntr(0) * glm::vec3(i + 0.5f, j + 0.5f, 1.0f)); auto radiance = glm::min(glm::abs(glm::dot(normal, ray_direction)), 1.0f); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(radiance * 255.0f)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(radiance * 255.0f)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(radiance * 255.0f)); surf2Dwrite(pixel_w, window, i * 4, j); } namespace kernel { float oneHalfChannelToWindowContent(hipSurfaceObject_t surface, const Window& window, float scale) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); oneHalfChannelToWindowContentKernel << <blocks, threads >> > (surface, window.get_content(), scale); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } float oneFloatChannelToWindowContent(hipSurfaceObject_t surface, const Window& window, float scale) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); oneFloatChannelToWindowContentKernel << <blocks, threads >> > (surface, window.get_content(), scale); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } float fourFloatChannelToWindowContent(hipSurfaceObject_t surface, const Window& window, float scale) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); fourFloatChannelToWindowContentKernel << <blocks, threads >> > (surface, window.get_content(), scale); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } float normalMapToWindowContent(hipSurfaceObject_t normal_map, const Window& window, const glm::mat3& inverse_sensor_rotation) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); normalMapToWindowContentKernel << <blocks, threads >> > (normal_map, window.get_content(), inverse_sensor_rotation); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } float shadingToWindowContent(hipSurfaceObject_t normal_map, const Window& window, const Sensor& sensor) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); shadingToWindowContentKernel << <blocks, threads >> > (normal_map, window.get_content(), sensor); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } }
d81441bfe1065614500ffa38ae9d6f7ffbbb6eee.cu
#include "display.cuh" #include "device_helper.cuh" #include "glm_macro.h" #include "cuda_event.h" #include <cuda_fp16.h> #include <glm/vec3.hpp> __global__ void oneHalfChannelToWindowContentKernel(cudaSurfaceObject_t surface, cudaSurfaceObject_t window, float scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; unsigned short h_pixel; surf2Dread(&h_pixel, surface, i * 2, j, cudaBoundaryModeZero); auto pixel = static_cast<unsigned char>(__half2float(h_pixel) * scale); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | pixel) << 8; pixel_w = (pixel_w | pixel) << 8; pixel_w = (pixel_w | pixel); surf2Dwrite(pixel_w, window, i * 4, j); } __global__ void oneFloatChannelToWindowContentKernel(cudaSurfaceObject_t surface, cudaSurfaceObject_t window, float scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float f_pixel; surf2Dread(&f_pixel, surface, i * 4, j, cudaBoundaryModeZero); auto pixel = static_cast<unsigned char>(f_pixel * scale); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | pixel) << 8; pixel_w = (pixel_w | pixel) << 8; pixel_w = (pixel_w | pixel); surf2Dwrite(pixel_w, window, i * 4, j); } __global__ void fourFloatChannelToWindowContentKernel(cudaSurfaceObject_t surface, cudaSurfaceObject_t window, float scale) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; float r, g, b; int idx = i * 16; surf2Dread(&r, surface, idx, j, cudaBoundaryModeZero); surf2Dread(&g, surface, idx + 4, j, cudaBoundaryModeZero); surf2Dread(&b, surface, idx + 8, j, cudaBoundaryModeZero); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(b * scale)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(g * scale)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(r * scale)); surf2Dwrite(pixel_w, window, i * 4, j); } __global__ void normalMapToWindowContentKernel(cudaSurfaceObject_t normal_map, cudaSurfaceObject_t window, glm::mat3 inverse_sensor_rotation) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; glm::vec3 normal; int idx = i * 16; surf2Dread(&normal.x, normal_map, idx, j, cudaBoundaryModeZero); surf2Dread(&normal.y, normal_map, idx + 4, j, cudaBoundaryModeZero); surf2Dread(&normal.z, normal_map, idx + 8, j, cudaBoundaryModeZero); normal = glm::normalize(inverse_sensor_rotation * normal); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(normal.z * 255.0f)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(normal.y * 255.0f)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(normal.x * 255.0f)); surf2Dwrite(pixel_w, window, i * 4, j); } __global__ void shadingToWindowContentKernel(cudaSurfaceObject_t normal_map, cudaSurfaceObject_t window, Sensor sensor) { int i = threadIdx.x + blockIdx.x * blockDim.x; int j = threadIdx.y + blockIdx.y * blockDim.y; glm::vec3 normal; int idx = i * 16; surf2Dread(&normal.x, normal_map, idx, j, cudaBoundaryModeZero); surf2Dread(&normal.y, normal_map, idx + 4, j, cudaBoundaryModeZero); surf2Dread(&normal.z, normal_map, idx + 8, j, cudaBoundaryModeZero); auto ray_direction = glm::normalize(glm::mat3(sensor.getPose()) * sensor.getInverseIntr(0) * glm::vec3(i + 0.5f, j + 0.5f, 1.0f)); auto radiance = glm::min(glm::abs(glm::dot(normal, ray_direction)), 1.0f); unsigned int pixel_w = (255) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(radiance * 255.0f)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(radiance * 255.0f)) << 8; pixel_w = (pixel_w | static_cast<unsigned char>(radiance * 255.0f)); surf2Dwrite(pixel_w, window, i * 4, j); } namespace kernel { float oneHalfChannelToWindowContent(cudaSurfaceObject_t surface, const Window& window, float scale) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); oneHalfChannelToWindowContentKernel << <blocks, threads >> > (surface, window.get_content(), scale); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } float oneFloatChannelToWindowContent(cudaSurfaceObject_t surface, const Window& window, float scale) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); oneFloatChannelToWindowContentKernel << <blocks, threads >> > (surface, window.get_content(), scale); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } float fourFloatChannelToWindowContent(cudaSurfaceObject_t surface, const Window& window, float scale) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); fourFloatChannelToWindowContentKernel << <blocks, threads >> > (surface, window.get_content(), scale); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } float normalMapToWindowContent(cudaSurfaceObject_t normal_map, const Window& window, const glm::mat3& inverse_sensor_rotation) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); normalMapToWindowContentKernel << <blocks, threads >> > (normal_map, window.get_content(), inverse_sensor_rotation); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } float shadingToWindowContent(cudaSurfaceObject_t normal_map, const Window& window, const Sensor& sensor) { CudaEvent start, end; dim3 threads(8, 8); dim3 blocks(640 / threads.x, 480 / threads.y); start.record(); shadingToWindowContentKernel << <blocks, threads >> > (normal_map, window.get_content(), sensor); end.record(); end.synchronize(); return CudaEvent::calculateElapsedTime(start, end); } }
c82ddf9cfa45761737e8ed71a0cec960ef20da90.hip
// !!! This is a file automatically generated by hipify!!! #include <cassert> #include <cfloat> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define mm_BLOCK_SIZE 16 //#define mm_SUPER_BLOCKS_PER_SM 4 //int mm_SUPER_BLOCKS_PER_SM = 4; #define iSizeMultiple 2 //must be multipes of 15 #define WA (4 * mm_BLOCK_SIZE) // Matrix A width #define HA (4 * mm_BLOCK_SIZE) // Matrix A height //#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width #define WB (60 * mm_BLOCK_SIZE) // Matrix B width #define HB WA // Matrix B height #define WC WB // Matrix C width #define HC HA // Matrix C height #define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE) #define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE) #define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y) #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] texture<float,2,hipReadModeElementType> tex_A; texture<float,2,hipReadModeElementType> tex_B; surface<void,1> sur_A; surface<void,1> sur_B; surface<void,2> sur_C; void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } } __global__ void mm_kernel( float* C, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * mm_BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = mm_BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = mm_BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = mm_BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix surf1Dread(&AS(ty,tx),sur_A,int((a+wA*ty+tx)*4),hipBoundaryModeTrap);// tex2D(tex_A,(a+wA*ty+tx)%wA,(a+wA*ty+tx)/wA);//A[a + wA * ty + tx]; surf1Dread(&BS(ty,tx),sur_B,(b+wB*ty+tx)*4,hipBoundaryModeTrap);//tex2D(tex_B,(b+wB*ty+tx)%wB,(b+wB*ty+tx)/wB);//B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < mm_BLOCK_SIZE; ++k) Csub += AS(ty, k) * BS(k, tx); // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx; // C[c + wB * ty + tx] = Csub; surf2Dwrite(Csub,sur_C,(c + wB * ty + tx)%wB*4,(c + wB * ty + tx)/wB,hipBoundaryModeTrap); //if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1); } int main(int argc, char **argv) { // hipSetDevice(1); srand(2013); unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC; uiWA = WA * iSizeMultiple; uiHA = HA * iSizeMultiple; uiWB = WB * iSizeMultiple; uiHB = HB * iSizeMultiple; uiWC = WC * iSizeMultiple; uiHC = HC * iSizeMultiple; // allocate host memory for matrices A and B unsigned int size_A = uiWA * uiHA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = uiWB * uiHB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); // allocate device memory float* d_A, *d_B, *d_C; unsigned int size_C = uiWC * uiHC; unsigned int mem_size_C = sizeof(float) * size_C; printf("size A = %d bytes,size B=%d bytes,size C=%d bytes\n",mem_size_A,mem_size_B,mem_size_C); // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); float* h_CUBLAS = (float*) malloc(mem_size_C); checkCudaErrors(hipMalloc((void**) &d_A, mem_size_A)); checkCudaErrors(hipMalloc((void**) &d_B, mem_size_B)); hipChannelFormatDesc channelDescA = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);//hipCreateChannelDesc<float>(); hipChannelFormatDesc channelDescB = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);//hipCreateChannelDesc<float>(); hipChannelFormatDesc channelDescC = hipCreateChannelDesc(32, 0, 0, 0, hipChannelFormatKindFloat);//hipCreateChannelDesc<float>(); hipArray* A_Array, *B_Array,*C_Array; hipMallocArray(&A_Array, &channelDescA, uiWA* uiHA,1,hipArraySurfaceLoadStore); hipMallocArray(&B_Array, &channelDescB, 262148/4,1,hipArraySurfaceLoadStore); hipMallocArray(&C_Array, &channelDescC, uiWC, uiHC,hipArraySurfaceLoadStore); // Copy to device memory some data located at address h_data // in host memory hipMemcpyToArray(A_Array, 0, 0, h_A, uiWA * uiHA * sizeof(float), hipMemcpyHostToDevice); hipMemcpyToArray(B_Array, 0, 0, h_B, 262148, hipMemcpyHostToDevice); hipBindSurfaceToArray(sur_A,A_Array,channelDescA); hipBindSurfaceToArray(sur_B,B_Array,channelDescB); hipBindSurfaceToArray(sur_C,C_Array,channelDescC); // Set texture reference parameters /* tex_A.addressMode[0] = hipAddressModeWrap; tex_A.addressMode[1] = hipAddressModeWrap; tex_A.filterMode = hipFilterModePoint; tex_B.addressMode[0] = hipAddressModeWrap; tex_B.addressMode[1] = hipAddressModeWrap; tex_B.filterMode = hipFilterModePoint; // Bind the array to the texture reference hipBindTextureToArray(tex_A, A_Array, channelDescA); hipBindTextureToArray(tex_B, B_Array, channelDescB); */ // copy host memory to device //checkCudaErrors(hipMemcpy(d_A, h_A, mem_size_A, hipMemcpyHostToDevice) ); //checkCudaErrors(hipMemcpy(d_B, h_B, mem_size_B, hipMemcpyHostToDevice) ); checkCudaErrors(hipMalloc((void**) &d_C, mem_size_C)); hipEvent_t kernel_start, kernel_stop; hipEventCreate(&kernel_start); hipEventCreate(&kernel_stop); float kernel_time = 0.0f; hipEventRecord(kernel_start, 0); // setup execution parameters dim3 mm_grid(mm_GRID_X, mm_GRID_Y); dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE); // int mm_grid=mm_GRID_X*mm_GRID_Y; // mm_kernel<<< mm_grid, mm_block>>>(d_C, uiWA, uiWB); hipDeviceSynchronize(); hipEventRecord(kernel_stop, 0); hipEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; hipEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; // copy result from device to host // checkCudaErrors(hipMemcpy(h_C, d_C, mem_size_C, hipMemcpyDeviceToHost) ); cudaMemcpyFromArray(h_C,C_Array,0,0,uiWC*uiHC*sizeof(float),hipMemcpyDeviceToHost); // compute reference solution float* reference = (float*)malloc(mem_size_C); computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB); // check result (matrixMul) bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f); printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL"); // ofstream f1("mm_correct.txt"); // for(int i=0; i<size_C; ++i) // f1 << reference[i] << endl; // f1.close(); // // ofstream f2("mm_gpu.txt"); // for(int i=0; i<size_C; ++i) // f2 << h_C[i] << endl; // f2.close(); // clean up memory free(h_A); free(h_B); free(h_C); free(reference); checkCudaErrors(hipFree(d_A)); checkCudaErrors(hipFree(d_B)); checkCudaErrors(hipFree(d_C)); return 0; }
c82ddf9cfa45761737e8ed71a0cec960ef20da90.cu
#include <cassert> #include <cfloat> #include <cuda_runtime_api.h> #include <cuda.h> #include <iostream> #include <stdio.h> #include <list> #include <map> #include <math.h> #include <stdlib.h> #include <vector> #include <set> #include <algorithm> #include <iterator> #include <fstream> #include "../include/common.h" #define K 1 using namespace std; #define mm_BLOCK_SIZE 16 //#define mm_SUPER_BLOCKS_PER_SM 4 //int mm_SUPER_BLOCKS_PER_SM = 4; #define iSizeMultiple 2 //must be multipes of 15 #define WA (4 * mm_BLOCK_SIZE) // Matrix A width #define HA (4 * mm_BLOCK_SIZE) // Matrix A height //#define WB (mm_SUPER_BLOCKS_PER_SM * mm_BLOCK_SIZE) // Matrix B width #define WB (60 * mm_BLOCK_SIZE) // Matrix B width #define HB WA // Matrix B height #define WC WB // Matrix C width #define HC HA // Matrix C height #define mm_GRID_X (WC*iSizeMultiple/mm_BLOCK_SIZE) #define mm_GRID_Y (HC*iSizeMultiple/mm_BLOCK_SIZE) #define mm_NBLOCKS (mm_GRID_X*mm_GRID_Y) #define AS(i, j) As[i][j] #define BS(i, j) Bs[i][j] texture<float,2,cudaReadModeElementType> tex_A; texture<float,2,cudaReadModeElementType> tex_B; surface<void,1> sur_A; surface<void,1> sur_B; surface<void,2> sur_C; void randomInit(float* data, int size) { for (int i = 0; i < size; ++i) data[i] = rand() / (float)RAND_MAX; } void computeGold(float* C, const float* A, const float* B, unsigned int hA, unsigned int wA, unsigned int wB) { for (unsigned int i = 0; i < hA; ++i) for (unsigned int j = 0; j < wB; ++j) { double sum = 0; for (unsigned int k = 0; k < wA; ++k) { double a = A[i * wA + k]; double b = B[k * wB + j]; sum += a * b; } C[i * wB + j] = (float)sum; } } __global__ void mm_kernel( float* C, int wA, int wB) { // Block index int bx = blockIdx.x; int by = blockIdx.y; // Thread index int tx = threadIdx.x; int ty = threadIdx.y; // Index of the first sub-matrix of A processed by the block int aBegin = wA * mm_BLOCK_SIZE * by; // Index of the last sub-matrix of A processed by the block int aEnd = aBegin + wA - 1; // Step size used to iterate through the sub-matrices of A int aStep = mm_BLOCK_SIZE; // Index of the first sub-matrix of B processed by the block int bBegin = mm_BLOCK_SIZE * bx; // Step size used to iterate through the sub-matrices of B int bStep = mm_BLOCK_SIZE * wB; // Csub is used to store the element of the block sub-matrix // that is computed by the thread float Csub = 0; // Loop over all the sub-matrices of A and B // required to compute the block sub-matrix for (int a = aBegin, b = bBegin; a <= aEnd; a += aStep, b += bStep) { // Declaration of the shared memory array As used to // store the sub-matrix of A __shared__ float As[mm_BLOCK_SIZE][mm_BLOCK_SIZE]; // Declaration of the shared memory array Bs used to // store the sub-matrix of B __shared__ float Bs[mm_BLOCK_SIZE][mm_BLOCK_SIZE]; // Load the matrices from device memory // to shared memory; each thread loads // one element of each matrix surf1Dread(&AS(ty,tx),sur_A,int((a+wA*ty+tx)*4),cudaBoundaryModeTrap);// tex2D(tex_A,(a+wA*ty+tx)%wA,(a+wA*ty+tx)/wA);//A[a + wA * ty + tx]; surf1Dread(&BS(ty,tx),sur_B,(b+wB*ty+tx)*4,cudaBoundaryModeTrap);//tex2D(tex_B,(b+wB*ty+tx)%wB,(b+wB*ty+tx)/wB);//B[b + wB * ty + tx]; // Synchronize to make sure the matrices are loaded __syncthreads(); // Multiply the two matrices together; // each thread computes one element // of the block sub-matrix #pragma unroll for (int k = 0; k < mm_BLOCK_SIZE; ++k) Csub += AS(ty, k) * BS(k, tx); // Synchronize to make sure that the preceding // computation is done before loading two new // sub-matrices of A and B in the next iteration __syncthreads(); } // Write the block sub-matrix to device memory; // each thread writes one element int c = wB * mm_BLOCK_SIZE * by + mm_BLOCK_SIZE * bx; // C[c + wB * ty + tx] = Csub; surf2Dwrite(Csub,sur_C,(c + wB * ty + tx)%wB*4,(c + wB * ty + tx)/wB,cudaBoundaryModeTrap); //if (threadIdx.x==0&&threadIdx.y==0) atomicAdd(d_flag,1); } int main(int argc, char **argv) { // cudaSetDevice(1); srand(2013); unsigned int uiWA, uiHA, uiWB, uiHB, uiWC, uiHC; uiWA = WA * iSizeMultiple; uiHA = HA * iSizeMultiple; uiWB = WB * iSizeMultiple; uiHB = HB * iSizeMultiple; uiWC = WC * iSizeMultiple; uiHC = HC * iSizeMultiple; // allocate host memory for matrices A and B unsigned int size_A = uiWA * uiHA; unsigned int mem_size_A = sizeof(float) * size_A; float* h_A = (float*)malloc(mem_size_A); unsigned int size_B = uiWB * uiHB; unsigned int mem_size_B = sizeof(float) * size_B; float* h_B = (float*)malloc(mem_size_B); // initialize host memory randomInit(h_A, size_A); randomInit(h_B, size_B); // allocate device memory float* d_A, *d_B, *d_C; unsigned int size_C = uiWC * uiHC; unsigned int mem_size_C = sizeof(float) * size_C; printf("size A = %d bytes,size B=%d bytes,size C=%d bytes\n",mem_size_A,mem_size_B,mem_size_C); // allocate host memory for the result float* h_C = (float*) malloc(mem_size_C); float* h_CUBLAS = (float*) malloc(mem_size_C); checkCudaErrors(cudaMalloc((void**) &d_A, mem_size_A)); checkCudaErrors(cudaMalloc((void**) &d_B, mem_size_B)); cudaChannelFormatDesc channelDescA = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);//cudaCreateChannelDesc<float>(); cudaChannelFormatDesc channelDescB = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);//cudaCreateChannelDesc<float>(); cudaChannelFormatDesc channelDescC = cudaCreateChannelDesc(32, 0, 0, 0, cudaChannelFormatKindFloat);//cudaCreateChannelDesc<float>(); cudaArray* A_Array, *B_Array,*C_Array; cudaMallocArray(&A_Array, &channelDescA, uiWA* uiHA,1,cudaArraySurfaceLoadStore); cudaMallocArray(&B_Array, &channelDescB, 262148/4,1,cudaArraySurfaceLoadStore); cudaMallocArray(&C_Array, &channelDescC, uiWC, uiHC,cudaArraySurfaceLoadStore); // Copy to device memory some data located at address h_data // in host memory cudaMemcpyToArray(A_Array, 0, 0, h_A, uiWA * uiHA * sizeof(float), cudaMemcpyHostToDevice); cudaMemcpyToArray(B_Array, 0, 0, h_B, 262148, cudaMemcpyHostToDevice); cudaBindSurfaceToArray(sur_A,A_Array,channelDescA); cudaBindSurfaceToArray(sur_B,B_Array,channelDescB); cudaBindSurfaceToArray(sur_C,C_Array,channelDescC); // Set texture reference parameters /* tex_A.addressMode[0] = cudaAddressModeWrap; tex_A.addressMode[1] = cudaAddressModeWrap; tex_A.filterMode = cudaFilterModePoint; tex_B.addressMode[0] = cudaAddressModeWrap; tex_B.addressMode[1] = cudaAddressModeWrap; tex_B.filterMode = cudaFilterModePoint; // Bind the array to the texture reference cudaBindTextureToArray(tex_A, A_Array, channelDescA); cudaBindTextureToArray(tex_B, B_Array, channelDescB); */ // copy host memory to device //checkCudaErrors(cudaMemcpy(d_A, h_A, mem_size_A, cudaMemcpyHostToDevice) ); //checkCudaErrors(cudaMemcpy(d_B, h_B, mem_size_B, cudaMemcpyHostToDevice) ); checkCudaErrors(cudaMalloc((void**) &d_C, mem_size_C)); cudaEvent_t kernel_start, kernel_stop; cudaEventCreate(&kernel_start); cudaEventCreate(&kernel_stop); float kernel_time = 0.0f; cudaEventRecord(kernel_start, 0); // setup execution parameters dim3 mm_grid(mm_GRID_X, mm_GRID_Y); dim3 mm_block(mm_BLOCK_SIZE, mm_BLOCK_SIZE); // int mm_grid=mm_GRID_X*mm_GRID_Y; // mm_kernel<<< mm_grid, mm_block>>>(d_C, uiWA, uiWB); cudaDeviceSynchronize(); cudaEventRecord(kernel_stop, 0); cudaEventSynchronize(kernel_stop); // get elapsed time kernel_time = 0.0f; cudaEventElapsedTime(&kernel_time, kernel_start, kernel_stop); kernel_time *= 1.e-3; // Convert to seconds cout << "kernel exe time: " << kernel_time << endl; // copy result from device to host // checkCudaErrors(cudaMemcpy(h_C, d_C, mem_size_C, cudaMemcpyDeviceToHost) ); cudaMemcpyFromArray(h_C,C_Array,0,0,uiWC*uiHC*sizeof(float),cudaMemcpyDeviceToHost); // compute reference solution float* reference = (float*)malloc(mem_size_C); computeGold(reference, h_A, h_B, uiHA, uiWA, uiWB); // check result (matrixMul) bool resCUDA = sdkCompareL2fe(reference, h_C, size_C, 1.0e-6f); printf("CUDA matrixMul compares %s\n\n", (true == resCUDA) ? "passed" : "FAIL"); // ofstream f1("mm_correct.txt"); // for(int i=0; i<size_C; ++i) // f1 << reference[i] << endl; // f1.close(); // // ofstream f2("mm_gpu.txt"); // for(int i=0; i<size_C; ++i) // f2 << h_C[i] << endl; // f2.close(); // clean up memory free(h_A); free(h_B); free(h_C); free(reference); checkCudaErrors(cudaFree(d_A)); checkCudaErrors(cudaFree(d_B)); checkCudaErrors(cudaFree(d_C)); return 0; }
0a776409e78a3f81dadec87d55fb52fdeed06d5f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <map> // NOLINT #include "gtest/gtest.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/phi/backends/context_pool.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/complex.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/common/place.h" #include "paddle/phi/common/scalar.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { namespace tests { using DDim = phi::DDim; using float16 = phi::dtype::float16; using complex64 = ::phi::dtype::complex<float>; using complex128 = ::phi::dtype::complex<double>; __global__ void FillTensor(float* data) { data[0] = 1; } TEST(Scalar, ConstructFromDenseTensor1) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::FLOAT16, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<float16>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); ASSERT_NEAR(1, scalar_test.to<float16>(), 1e-6); } TEST(Scalar, ConstructFromDenseTensor2) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::INT16, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<int16_t>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); ASSERT_EQ(1, scalar_test.to<int16_t>()); } TEST(Scalar, ConstructFromDenseTensor3) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::INT8, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<int8_t>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); ASSERT_EQ(1, scalar_test.to<int8_t>()); } TEST(Scalar, ConstructFromDenseTensor4) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::BOOL, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<bool>(&dense_x); dense_x_data[0] = true; phi::Scalar scalar_test(dense_x); ASSERT_EQ(true, scalar_test.to<bool>()); } TEST(Scalar, ConstructFromDenseTensor5) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x(alloc.get(), phi::DenseTensorMeta(phi::DataType::COMPLEX64, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<complex64>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); complex64 expected_value(1, 0); EXPECT_TRUE(expected_value == scalar_test.to<complex64>()); } TEST(Scalar, ConstructFromDenseTensor6) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x(alloc.get(), phi::DenseTensorMeta(phi::DataType::COMPLEX128, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<complex128>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); complex128 expected_value(1, 0); EXPECT_TRUE(expected_value == scalar_test.to<complex128>()); } TEST(Scalar, ConstructFromDenseTensor7) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::GPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::FLOAT32, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::GPUContext*>(pool.Get(phi::GPUPlace())); auto* dense_x_data = dev_ctx->Alloc<float>(&dense_x); hipLaunchKernelGGL(( FillTensor), dim3(1), dim3(1), 0, dev_ctx->stream(), dense_x_data); dev_ctx->Wait(); phi::Scalar scalar_test(dense_x); ASSERT_NEAR(1, scalar_test.to<float>(), 1e-6); } TEST(Scalar, ConstructFromTensor) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::GPUPlace()); auto dense_x = std::make_shared<phi::DenseTensor>( alloc.get(), phi::DenseTensorMeta( phi::DataType::FLOAT32, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::GPUContext*>(pool.Get(phi::GPUPlace())); auto* dense_x_data = dev_ctx->Alloc<float>(dense_x.get()); hipLaunchKernelGGL(( FillTensor), dim3(1), dim3(1), 0, dev_ctx->stream(), dense_x_data); dev_ctx->Wait(); paddle::Tensor x(dense_x); paddle::experimental::Scalar scalar_test(x); ASSERT_NEAR(1, scalar_test.to<float>(), 1e-6); } } // namespace tests } // namespace phi
0a776409e78a3f81dadec87d55fb52fdeed06d5f.cu
/* Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <map> // NOLINT #include "gtest/gtest.h" #include "paddle/phi/api/include/tensor.h" #include "paddle/phi/api/lib/utils/allocator.h" #include "paddle/phi/backends/context_pool.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/common/complex.h" #include "paddle/phi/common/float16.h" #include "paddle/phi/common/place.h" #include "paddle/phi/common/scalar.h" #include "paddle/phi/core/dense_tensor.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { namespace tests { using DDim = phi::DDim; using float16 = phi::dtype::float16; using complex64 = ::phi::dtype::complex<float>; using complex128 = ::phi::dtype::complex<double>; __global__ void FillTensor(float* data) { data[0] = 1; } TEST(Scalar, ConstructFromDenseTensor1) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::FLOAT16, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<float16>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); ASSERT_NEAR(1, scalar_test.to<float16>(), 1e-6); } TEST(Scalar, ConstructFromDenseTensor2) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::INT16, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<int16_t>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); ASSERT_EQ(1, scalar_test.to<int16_t>()); } TEST(Scalar, ConstructFromDenseTensor3) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::INT8, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<int8_t>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); ASSERT_EQ(1, scalar_test.to<int8_t>()); } TEST(Scalar, ConstructFromDenseTensor4) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::BOOL, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<bool>(&dense_x); dense_x_data[0] = true; phi::Scalar scalar_test(dense_x); ASSERT_EQ(true, scalar_test.to<bool>()); } TEST(Scalar, ConstructFromDenseTensor5) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x(alloc.get(), phi::DenseTensorMeta(phi::DataType::COMPLEX64, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<complex64>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); complex64 expected_value(1, 0); EXPECT_TRUE(expected_value == scalar_test.to<complex64>()); } TEST(Scalar, ConstructFromDenseTensor6) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::CPUPlace()); phi::DenseTensor dense_x(alloc.get(), phi::DenseTensorMeta(phi::DataType::COMPLEX128, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::CPUContext*>(pool.Get(phi::CPUPlace())); auto* dense_x_data = dev_ctx->Alloc<complex128>(&dense_x); dense_x_data[0] = 1; phi::Scalar scalar_test(dense_x); complex128 expected_value(1, 0); EXPECT_TRUE(expected_value == scalar_test.to<complex128>()); } TEST(Scalar, ConstructFromDenseTensor7) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::GPUPlace()); phi::DenseTensor dense_x( alloc.get(), phi::DenseTensorMeta( phi::DataType::FLOAT32, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::GPUContext*>(pool.Get(phi::GPUPlace())); auto* dense_x_data = dev_ctx->Alloc<float>(&dense_x); FillTensor<<<1, 1, 0, dev_ctx->stream()>>>(dense_x_data); dev_ctx->Wait(); phi::Scalar scalar_test(dense_x); ASSERT_NEAR(1, scalar_test.to<float>(), 1e-6); } TEST(Scalar, ConstructFromTensor) { // 1. create tensor const auto alloc = std::make_unique<paddle::experimental::DefaultAllocator>(phi::GPUPlace()); auto dense_x = std::make_shared<phi::DenseTensor>( alloc.get(), phi::DenseTensorMeta( phi::DataType::FLOAT32, phi::make_ddim({1}), phi::DataLayout::NCHW)); phi::DeviceContextPool& pool = phi::DeviceContextPool::Instance(); auto* dev_ctx = reinterpret_cast<phi::GPUContext*>(pool.Get(phi::GPUPlace())); auto* dense_x_data = dev_ctx->Alloc<float>(dense_x.get()); FillTensor<<<1, 1, 0, dev_ctx->stream()>>>(dense_x_data); dev_ctx->Wait(); paddle::Tensor x(dense_x); paddle::experimental::Scalar scalar_test(x); ASSERT_NEAR(1, scalar_test.to<float>(), 1e-6); } } // namespace tests } // namespace phi
14363e750d8d236e31f82c57192b655f1a1173a5.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdint> #ifdef WITH_CUDA #include "oneflow/core/common/data_type.h" #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/replication_pad_kernels_util.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace user_op { template<typename IN_T> __global__ void DoCUDAReplicationPad1d(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 3> index_helper, const int64_t elem_num, const int64_t src_num, const int64_t dest_num, const int64_t y_width, const int64_t x_width, const int64_t pad_left) { DoReplicationPad1d<IN_T>(src, dest, index_helper, elem_num, src_num, dest_num, y_width, x_width, pad_left); }; template<typename IN_T> __global__ void DoCUDAReplicationPad1dGrad(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 3> index_helper, const int64_t elem_num, const int64_t src_num, const int64_t dest_num, const int64_t dy_width, const int64_t dx_width, const int64_t pad_left) { DoReplicationPad1dGrad<IN_T>(src, dest, index_helper, elem_num, src_num, dest_num, dy_width, dx_width, pad_left); }; template<typename IN_T> __global__ void DoCUDAReplicationPad2d(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 4> index_helper, const int64_t elem_num, const int64_t src_num, const int64_t dest_num, const int64_t y_height, const int64_t y_width, const int64_t x_height, const int64_t x_width, const int64_t pad_left, const int64_t pad_top) { DoReplicationPad2d<IN_T>(src, dest, index_helper, elem_num, src_num, dest_num, y_height, y_width, x_height, x_width, pad_left, pad_top); }; template<typename IN_T> __global__ void DoCUDAReplicationPad2dGrad(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 4> index_helper, const int64_t elem_num, const int64_t src_num, const int64_t dest_num, const int64_t dy_height, const int64_t dy_width, const int64_t dx_height, const int64_t dx_width, const int64_t pad_left, const int64_t pad_top) { DoReplicationPad2dGrad<IN_T>(src, dest, index_helper, elem_num, src_num, dest_num, dy_height, dy_width, dx_height, dx_width, pad_left, pad_top); }; template<typename IN_T> struct ReplicationPad1dFunctor<DeviceType::kCUDA, IN_T> final { void operator()(ep::Stream* stream, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 3>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t y_width, const int64_t x_width, const int64_t pad_left) { const int64_t dest_num = n_channel * y_width; const int64_t src_num = n_channel * x_width; const int64_t elem_num = n_batch * dest_num; hipLaunchKernelGGL(( DoCUDAReplicationPad1d<IN_T>), dim3(BlocksNum4ThreadsNum(elem_num)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), src, dest, index_helper, elem_num, src_num, dest_num, y_width, x_width, pad_left); } }; // float16 implementation template<> void ReplicationPad1dFunctor<DeviceType::kCUDA, float16>::operator()( ep::Stream* stream, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 3>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t y_width, const int64_t x_width, const int64_t pad_left) { const int64_t dest_num = n_channel * y_width; const int64_t src_num = n_channel * x_width; const int64_t elem_num = n_batch * dest_num; hipLaunchKernelGGL(( DoCUDAReplicationPad1d<half>), dim3(BlocksNum4ThreadsNum(elem_num)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, elem_num, src_num, dest_num, y_width, x_width, pad_left); } template<typename IN_T> struct ReplicationPad1dGradFunctor<DeviceType::kCUDA, IN_T> final { void operator()(ep::Stream* stream, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 3>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t dy_width, const int64_t dx_width, const int64_t pad_left) { const int64_t dest_num = n_channel * dx_width; const int64_t src_num = n_channel * dy_width; const int64_t elem_num = n_batch * src_num; hipLaunchKernelGGL(( DoCUDAReplicationPad1dGrad<IN_T>), dim3(BlocksNum4ThreadsNum(elem_num)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), src, dest, index_helper, elem_num, src_num, dest_num, dy_width, dx_width, pad_left); } }; // float16 implementation template<> void ReplicationPad1dGradFunctor<DeviceType::kCUDA, float16>::operator()( ep::Stream* stream, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 3>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t dy_width, const int64_t dx_width, const int64_t pad_left) { const int64_t dest_num = n_channel * dx_width; const int64_t src_num = n_channel * dy_width; const int64_t elem_num = n_batch * src_num; hipLaunchKernelGGL(( DoCUDAReplicationPad1dGrad<half>), dim3(BlocksNum4ThreadsNum(elem_num)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, elem_num, src_num, dest_num, dy_width, dx_width, pad_left); } template<typename IN_T> struct ReplicationPad2dFunctor<DeviceType::kCUDA, IN_T> final { void operator()(ep::Stream* stream, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 4>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t y_height, const int64_t y_width, const int64_t x_height, const int64_t x_width, const int64_t pad_left, const int64_t pad_top) { const int64_t dest_num = n_channel * y_height * y_width; const int64_t src_num = n_channel * x_height * x_width; const int64_t elem_num = n_batch * dest_num; hipLaunchKernelGGL(( DoCUDAReplicationPad2d<IN_T>), dim3(BlocksNum4ThreadsNum(elem_num)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), src, dest, index_helper, elem_num, src_num, dest_num, y_height, y_width, x_height, x_width, pad_left, pad_top); } }; // float16 implementation template<> void ReplicationPad2dFunctor<DeviceType::kCUDA, float16>::operator()( ep::Stream* stream, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 4>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t y_height, const int64_t y_width, const int64_t x_height, const int64_t x_width, const int64_t pad_left, const int64_t pad_top) { const int64_t dest_num = n_channel * y_height * y_width; const int64_t src_num = n_channel * x_height * x_width; const int64_t elem_num = n_batch * dest_num; hipLaunchKernelGGL(( DoCUDAReplicationPad2d<half>), dim3(BlocksNum4ThreadsNum(elem_num)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, elem_num, src_num, dest_num, y_height, y_width, x_height, x_width, pad_left, pad_top); } template<typename IN_T> struct ReplicationPad2dGradFunctor<DeviceType::kCUDA, IN_T> final { void operator()(ep::Stream* stream, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 4>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t dy_height, const int64_t dy_width, const int64_t dx_height, const int64_t dx_width, const int64_t pad_left, const int64_t pad_top) { const int64_t dest_num = n_channel * dx_height * dx_width; const int64_t src_num = n_channel * dy_height * dy_width; const int64_t elem_num = n_batch * src_num; hipLaunchKernelGGL(( DoCUDAReplicationPad2dGrad<IN_T>), dim3(BlocksNum4ThreadsNum(elem_num)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), src, dest, index_helper, elem_num, src_num, dest_num, dy_height, dy_width, dx_height, dx_width, pad_left, pad_top); } }; // float16 implementation template<> void ReplicationPad2dGradFunctor<DeviceType::kCUDA, float16>::operator()( ep::Stream* stream, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 4>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t dy_height, const int64_t dy_width, const int64_t dx_height, const int64_t dx_width, const int64_t pad_left, const int64_t pad_top) { const int64_t dest_num = n_channel * dx_height * dx_width; const int64_t src_num = n_channel * dy_height * dy_width; const int64_t elem_num = n_batch * src_num; hipLaunchKernelGGL(( DoCUDAReplicationPad2dGrad<half>), dim3(BlocksNum4ThreadsNum(elem_num)), dim3(kCudaThreadsNumPerBlock), 0, stream->As<ep::CudaStream>()->cuda_stream(), reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, elem_num, src_num, dest_num, dy_height, dy_width, dx_height, dx_width, pad_left, pad_top); } OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_REPLICATION_PAD_FUNCTOR, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kCUDA), PADDING_DATA_TYPE_CUDA_SEQ); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_REPLICATION_PAD_GRAD_FUNCTOR, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kCUDA), PADDING_DATA_TYPE_CUDA_SEQ); } // namespace user_op } // namespace oneflow #endif // WITH_CUDA
14363e750d8d236e31f82c57192b655f1a1173a5.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include <cstdint> #ifdef WITH_CUDA #include "oneflow/core/common/data_type.h" #include "oneflow/core/framework/framework.h" #include "oneflow/user/kernels/replication_pad_kernels_util.h" #include "oneflow/core/ep/cuda/cuda_stream.h" namespace oneflow { namespace user_op { template<typename IN_T> __global__ void DoCUDAReplicationPad1d(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 3> index_helper, const int64_t elem_num, const int64_t src_num, const int64_t dest_num, const int64_t y_width, const int64_t x_width, const int64_t pad_left) { DoReplicationPad1d<IN_T>(src, dest, index_helper, elem_num, src_num, dest_num, y_width, x_width, pad_left); }; template<typename IN_T> __global__ void DoCUDAReplicationPad1dGrad(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 3> index_helper, const int64_t elem_num, const int64_t src_num, const int64_t dest_num, const int64_t dy_width, const int64_t dx_width, const int64_t pad_left) { DoReplicationPad1dGrad<IN_T>(src, dest, index_helper, elem_num, src_num, dest_num, dy_width, dx_width, pad_left); }; template<typename IN_T> __global__ void DoCUDAReplicationPad2d(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 4> index_helper, const int64_t elem_num, const int64_t src_num, const int64_t dest_num, const int64_t y_height, const int64_t y_width, const int64_t x_height, const int64_t x_width, const int64_t pad_left, const int64_t pad_top) { DoReplicationPad2d<IN_T>(src, dest, index_helper, elem_num, src_num, dest_num, y_height, y_width, x_height, x_width, pad_left, pad_top); }; template<typename IN_T> __global__ void DoCUDAReplicationPad2dGrad(const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 4> index_helper, const int64_t elem_num, const int64_t src_num, const int64_t dest_num, const int64_t dy_height, const int64_t dy_width, const int64_t dx_height, const int64_t dx_width, const int64_t pad_left, const int64_t pad_top) { DoReplicationPad2dGrad<IN_T>(src, dest, index_helper, elem_num, src_num, dest_num, dy_height, dy_width, dx_height, dx_width, pad_left, pad_top); }; template<typename IN_T> struct ReplicationPad1dFunctor<DeviceType::kCUDA, IN_T> final { void operator()(ep::Stream* stream, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 3>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t y_width, const int64_t x_width, const int64_t pad_left) { const int64_t dest_num = n_channel * y_width; const int64_t src_num = n_channel * x_width; const int64_t elem_num = n_batch * dest_num; DoCUDAReplicationPad1d<IN_T><<<BlocksNum4ThreadsNum(elem_num), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( src, dest, index_helper, elem_num, src_num, dest_num, y_width, x_width, pad_left); } }; // float16 implementation template<> void ReplicationPad1dFunctor<DeviceType::kCUDA, float16>::operator()( ep::Stream* stream, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 3>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t y_width, const int64_t x_width, const int64_t pad_left) { const int64_t dest_num = n_channel * y_width; const int64_t src_num = n_channel * x_width; const int64_t elem_num = n_batch * dest_num; DoCUDAReplicationPad1d<half><<<BlocksNum4ThreadsNum(elem_num), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, elem_num, src_num, dest_num, y_width, x_width, pad_left); } template<typename IN_T> struct ReplicationPad1dGradFunctor<DeviceType::kCUDA, IN_T> final { void operator()(ep::Stream* stream, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 3>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t dy_width, const int64_t dx_width, const int64_t pad_left) { const int64_t dest_num = n_channel * dx_width; const int64_t src_num = n_channel * dy_width; const int64_t elem_num = n_batch * src_num; DoCUDAReplicationPad1dGrad<IN_T><<<BlocksNum4ThreadsNum(elem_num), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( src, dest, index_helper, elem_num, src_num, dest_num, dy_width, dx_width, pad_left); } }; // float16 implementation template<> void ReplicationPad1dGradFunctor<DeviceType::kCUDA, float16>::operator()( ep::Stream* stream, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 3>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t dy_width, const int64_t dx_width, const int64_t pad_left) { const int64_t dest_num = n_channel * dx_width; const int64_t src_num = n_channel * dy_width; const int64_t elem_num = n_batch * src_num; DoCUDAReplicationPad1dGrad<half><<<BlocksNum4ThreadsNum(elem_num), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, elem_num, src_num, dest_num, dy_width, dx_width, pad_left); } template<typename IN_T> struct ReplicationPad2dFunctor<DeviceType::kCUDA, IN_T> final { void operator()(ep::Stream* stream, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 4>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t y_height, const int64_t y_width, const int64_t x_height, const int64_t x_width, const int64_t pad_left, const int64_t pad_top) { const int64_t dest_num = n_channel * y_height * y_width; const int64_t src_num = n_channel * x_height * x_width; const int64_t elem_num = n_batch * dest_num; DoCUDAReplicationPad2d<IN_T><<<BlocksNum4ThreadsNum(elem_num), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( src, dest, index_helper, elem_num, src_num, dest_num, y_height, y_width, x_height, x_width, pad_left, pad_top); } }; // float16 implementation template<> void ReplicationPad2dFunctor<DeviceType::kCUDA, float16>::operator()( ep::Stream* stream, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 4>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t y_height, const int64_t y_width, const int64_t x_height, const int64_t x_width, const int64_t pad_left, const int64_t pad_top) { const int64_t dest_num = n_channel * y_height * y_width; const int64_t src_num = n_channel * x_height * x_width; const int64_t elem_num = n_batch * dest_num; DoCUDAReplicationPad2d<half><<<BlocksNum4ThreadsNum(elem_num), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, elem_num, src_num, dest_num, y_height, y_width, x_height, x_width, pad_left, pad_top); } template<typename IN_T> struct ReplicationPad2dGradFunctor<DeviceType::kCUDA, IN_T> final { void operator()(ep::Stream* stream, const IN_T* src, IN_T* dest, const NdIndexOffsetHelper<int64_t, 4>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t dy_height, const int64_t dy_width, const int64_t dx_height, const int64_t dx_width, const int64_t pad_left, const int64_t pad_top) { const int64_t dest_num = n_channel * dx_height * dx_width; const int64_t src_num = n_channel * dy_height * dy_width; const int64_t elem_num = n_batch * src_num; DoCUDAReplicationPad2dGrad<IN_T><<<BlocksNum4ThreadsNum(elem_num), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( src, dest, index_helper, elem_num, src_num, dest_num, dy_height, dy_width, dx_height, dx_width, pad_left, pad_top); } }; // float16 implementation template<> void ReplicationPad2dGradFunctor<DeviceType::kCUDA, float16>::operator()( ep::Stream* stream, const float16* src, float16* dest, const NdIndexOffsetHelper<int64_t, 4>& index_helper, const int64_t n_batch, const int64_t n_channel, const int64_t dy_height, const int64_t dy_width, const int64_t dx_height, const int64_t dx_width, const int64_t pad_left, const int64_t pad_top) { const int64_t dest_num = n_channel * dx_height * dx_width; const int64_t src_num = n_channel * dy_height * dy_width; const int64_t elem_num = n_batch * src_num; DoCUDAReplicationPad2dGrad<half><<<BlocksNum4ThreadsNum(elem_num), kCudaThreadsNumPerBlock, 0, stream->As<ep::CudaStream>()->cuda_stream()>>>( reinterpret_cast<const half*>(src), reinterpret_cast<half*>(dest), index_helper, elem_num, src_num, dest_num, dy_height, dy_width, dx_height, dx_width, pad_left, pad_top); } OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_REPLICATION_PAD_FUNCTOR, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kCUDA), PADDING_DATA_TYPE_CUDA_SEQ); OF_PP_SEQ_PRODUCT_FOR_EACH_TUPLE(INSTANTIATE_REPLICATION_PAD_GRAD_FUNCTOR, OF_PP_MAKE_TUPLE_SEQ(DeviceType::kCUDA), PADDING_DATA_TYPE_CUDA_SEQ); } // namespace user_op } // namespace oneflow #endif // WITH_CUDA
62d8e66ac6d7275e9181fff547ccbd991274d8d8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/lrn_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL) << "Unknown normalization region."; } } // TODO: check if it would be faster to just put it into the previous kernel. template <typename Dtype> __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNFillScale), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNComputeOutput), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; } template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template <typename Dtype> void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; const int pre_pad = size - (size + 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LRNComputeDiff), dim3(CAFFE_GET_BLOCKS(n_threads)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); } template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe
62d8e66ac6d7275e9181fff547ccbd991274d8d8.cu
#include <vector> #include "caffe/layers/lrn_layer.hpp" #include "caffe/util/math_functions.hpp" #include "caffe/vision_layers.hpp" namespace caffe { template <typename Dtype> __global__ void LRNFillScale(const int nthreads, const Dtype* const in, const int num, const int channels, const int height, const int width, const int size, const Dtype alpha_over_size, const Dtype k, Dtype* const scale) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const in_off = in + offset; Dtype* const scale_off = scale + offset; int head = 0; const int pre_pad = (size - 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_scale = 0; // fill the scale at [n, :, h, w] // accumulate values while (head < post_pad && head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_scale += in_off[head * step] * in_off[head * step]; if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_scale -= in_off[(head - size) * step] * in_off[(head - size) * step]; } scale_off[(head - post_pad) * step] = k + accum_scale * alpha_over_size; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelForward_gpu(bottom, top); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelForward(bottom, top); break; default: LOG(FATAL) << "Unknown normalization region."; } } // TODO: check if it would be faster to just put it into the previous kernel. template <typename Dtype> __global__ void LRNComputeOutput(const int nthreads, const Dtype* const in, const Dtype* const scale, const Dtype negative_beta, Dtype* const out) { CUDA_KERNEL_LOOP(index, nthreads) { out[index] = in[index] * pow(scale[index], negative_beta); } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelForward_gpu( const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { // First, compute scale const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); Dtype* scale_data = scale_.mutable_gpu_data(); // We will launch one kernel for each pixel location, and have the kernel // go through all the channels. int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNFillScale<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom_data, num_, channels_, height_, width_, size_, alpha_ / size_, k_, scale_data); CUDA_POST_KERNEL_CHECK; n_threads = bottom[0]->count(); // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeOutput<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom_data, scale_data, -beta_, top_data); CUDA_POST_KERNEL_CHECK; } template void LRNLayer<float>::CrossChannelForward_gpu( const vector<Blob<float>*>& bottom, const vector<Blob<float>*>& top); template void LRNLayer<double>::CrossChannelForward_gpu( const vector<Blob<double>*>& bottom, const vector<Blob<double>*>& top); template <typename Dtype> void LRNLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { switch (this->layer_param_.lrn_param().norm_region()) { case LRNParameter_NormRegion_ACROSS_CHANNELS: CrossChannelBackward_gpu(top, propagate_down, bottom); break; case LRNParameter_NormRegion_WITHIN_CHANNEL: WithinChannelBackward(top, propagate_down, bottom); break; default: LOG(FATAL) << "Unknown normalization region."; } } template <typename Dtype> __global__ void LRNComputeDiff(const int nthreads, const Dtype* const bottom_data, const Dtype* const top_data, const Dtype* const scale, const Dtype* const top_diff, const int num, const int channels, const int height, const int width, const int size, const Dtype negative_beta, const Dtype cache_ratio, Dtype* const bottom_diff) { CUDA_KERNEL_LOOP(index, nthreads) { // find out the local offset const int w = index % width; const int h = (index / width) % height; const int n = index / width / height; const int offset = (n * channels * height + h) * width + w; const int step = height * width; const Dtype* const bottom_off = bottom_data + offset; const Dtype* const top_off = top_data + offset; const Dtype* const scale_off = scale + offset; const Dtype* const top_diff_off = top_diff + offset; Dtype* const bottom_diff_off = bottom_diff + offset; int head = 0; const int pre_pad = size - (size + 1) / 2; const int post_pad = size - pre_pad - 1; Dtype accum_ratio = 0; // accumulate values while (head < post_pad && head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; ++head; } // both add and subtract while (head < channels) { accum_ratio += top_diff_off[head * step] * top_off[head * step] / scale_off[head * step]; if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } // subtract only while (head < channels + post_pad) { if (head - size >= 0) { accum_ratio -= top_diff_off[(head - size) * step] * top_off[(head - size) * step] / scale_off[(head - size) * step]; } bottom_diff_off[(head - post_pad) * step] = top_diff_off[(head - post_pad) * step] * pow(scale_off[(head - post_pad) * step], negative_beta) - cache_ratio * bottom_off[(head - post_pad) * step] * accum_ratio; ++head; } } } template <typename Dtype> void LRNLayer<Dtype>::CrossChannelBackward_gpu( const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { int n_threads = num_ * height_ * width_; // NOLINT_NEXT_LINE(whitespace/operators) LRNComputeDiff<<<CAFFE_GET_BLOCKS(n_threads), CAFFE_CUDA_NUM_THREADS>>>( n_threads, bottom[0]->gpu_data(), top[0]->gpu_data(), scale_.gpu_data(), top[0]->gpu_diff(), num_, channels_, height_, width_, size_, -beta_, Dtype(2. * alpha_ * beta_ / size_), bottom[0]->mutable_gpu_diff()); } template void LRNLayer<float>::CrossChannelBackward_gpu( const vector<Blob<float>*>& top, const vector<bool>& propagate_down, const vector<Blob<float>*>& bottom); template void LRNLayer<double>::CrossChannelBackward_gpu( const vector<Blob<double>*>& top, const vector<bool>& propagate_down, const vector<Blob<double>*>& bottom); INSTANTIATE_LAYER_GPU_FUNCS(LRNLayer); } // namespace caffe
127b1b73def35a6a955db01356b8dfa2ebfc47c3.hip
// !!! This is a file automatically generated by hipify!!! /* Sample code for Sparse-Matrix-Dense Matrix multiplication.*/ #include <iostream> #include <cstdlib> #include <time.h> #include <hip/hip_runtime.h> #include <minigun/minigun.h> #include "../samples_utils.h" #include "../samples_io.h" struct GData { int32_t dim = 0; float* cur{nullptr}; float* next{nullptr}; float* weight{nullptr}; int* eid_mapping{nullptr}; }; struct SPMMFunctor { static __device__ __forceinline__ void ApplyEdge( int32_t src, int32_t dst, int32_t eid, GData* gdata) {} static __device__ __forceinline__ void ApplyEdgeReduce( int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float* val, GData* gdata) { *val += gdata->cur[src * gdata->dim + feat_idx] * gdata->weight[gdata->eid_mapping[eid]]; } static __device__ __forceinline__ int32_t GetFeatSize(GData* gdata) { return gdata->dim; } static __device__ __forceinline__ float* GetOutBuf(GData* gdata) { return gdata->next; } static __device__ __forceinline__ int32_t GetOutOffset(int32_t idx, GData* gdata) { return idx; } }; const int32_t D = 128; // number of features std::vector<float> GroundTruth( const std::vector<int32_t>& row_offsets, const std::vector<int32_t>& column_indices, const std::vector<float>& vdata, const std::vector<float>& edata) { std::vector<float> ret(vdata.size(), 0); for (size_t u = 0; u < row_offsets.size() - 1; ++u) { for (int32_t eid = row_offsets[u]; eid < row_offsets[u+1]; ++eid) { int32_t v = column_indices[eid]; for (int32_t idx = 0; idx < D; ++idx) { ret[v * D + idx] += vdata[u * D + idx] * edata[eid]; } } } return ret; } int main(int argc, char** argv) { srand(42); // create graph std::vector<int32_t> row_offsets, column_indices; utils::CreateNPGraph(1000, 0.01, row_offsets, column_indices); const int32_t N = row_offsets.size() - 1; const int32_t M = column_indices.size(); std::cout << "#nodes: " << N << " #edges: " << M << " #feats: " << D << std::endl; // copy graph to gpu CUDA_CALL(hipSetDevice(0)); minigun::IntCsr csr; csr.row_offsets.length = row_offsets.size(); CUDA_CALL(hipMalloc(&csr.row_offsets.data, sizeof(int32_t) * row_offsets.size())); CUDA_CALL(hipMemcpy(csr.row_offsets.data, &row_offsets[0], sizeof(int32_t) * row_offsets.size(), hipMemcpyHostToDevice)); csr.column_indices.length = column_indices.size(); CUDA_CALL(hipMalloc(&csr.column_indices.data, sizeof(int32_t) * column_indices.size())); CUDA_CALL(hipMemcpy(csr.column_indices.data, &column_indices[0], sizeof(int32_t) * column_indices.size(), hipMemcpyHostToDevice)); csr.num_rows = N; csr.num_cols = N; // Create raw eid_mapping minigun::IntArray csr_mapping = utils::arange(0, M, kDLGPU); // Create csr_t and coo minigun::IntCsr csr_t; auto pack = utils::ToReverseCsr(csr, csr_mapping, kDLGPU); csr_t = pack.first; minigun::IntArray csr_t_mapping = pack.second; minigun::IntCoo coo; coo = utils::ToCoo(csr, kDLGPU); minigun::IntSpMat spmat = {&csr, &csr_t, &coo}; // Create stream minigun::advance::RuntimeConfig config; config.ctx = {kDLGPU, 0}; int nt = 1; //utils::_FindNumThreads(D, 32); config.data_num_threads = nt; config.data_num_blocks = 1; CUDA_CALL(hipStreamCreate(&config.stream)); // Create feature data std::vector<float> vvec(N * D), evec(M); for (int32_t i = 0; i < N * D; ++i) { vvec[i] = (float)rand() / RAND_MAX; } for (int32_t i = 0; i < M; ++i) { evec[i] = (float)rand() / RAND_MAX; } // Copy feature data to gpu GData gdata; gdata.dim = D; CUDA_CALL(hipMalloc(&gdata.cur, sizeof(float) * N * D)); CUDA_CALL(hipMemcpy(gdata.cur, &vvec[0], sizeof(float) * N * D, hipMemcpyHostToDevice)); CUDA_CALL(hipMalloc(&gdata.next, sizeof(float) * N * D)); CUDA_CALL(hipMemset(gdata.next, 0, sizeof(float) * N * D)); CUDA_CALL(hipMalloc(&gdata.weight, sizeof(float) * M)); CUDA_CALL(hipMemcpy(gdata.weight, &evec[0], sizeof(float) * M, hipMemcpyHostToDevice)); gdata.eid_mapping = csr_t_mapping.data; CUDA_CALL(hipDeviceSynchronize()); // Compute ground truth std::vector<float> truth = GroundTruth(row_offsets, column_indices, vvec, evec); typedef minigun::advance::Config<minigun::advance::kDst> Config; minigun::advance::Advance<kDLGPU, int32_t, float, Config, GData, SPMMFunctor>( config, spmat, &gdata); CUDA_CALL(hipDeviceSynchronize()); // verify output std::vector<float> rst(N * D); CUDA_CALL(hipMemcpy(&rst[0], gdata.next, sizeof(float) * N * D, hipMemcpyDeviceToHost)); std::cout << "Correct? " << utils::VecEqual(truth, rst) << std::endl; // free return 0; }
127b1b73def35a6a955db01356b8dfa2ebfc47c3.cu
/* Sample code for Sparse-Matrix-Dense Matrix multiplication.*/ #include <iostream> #include <cstdlib> #include <time.h> #include <cuda_runtime.h> #include <minigun/minigun.h> #include "../samples_utils.h" #include "../samples_io.h" struct GData { int32_t dim = 0; float* cur{nullptr}; float* next{nullptr}; float* weight{nullptr}; int* eid_mapping{nullptr}; }; struct SPMMFunctor { static __device__ __forceinline__ void ApplyEdge( int32_t src, int32_t dst, int32_t eid, GData* gdata) {} static __device__ __forceinline__ void ApplyEdgeReduce( int32_t src, int32_t dst, int32_t eid, int32_t feat_idx, float* val, GData* gdata) { *val += gdata->cur[src * gdata->dim + feat_idx] * gdata->weight[gdata->eid_mapping[eid]]; } static __device__ __forceinline__ int32_t GetFeatSize(GData* gdata) { return gdata->dim; } static __device__ __forceinline__ float* GetOutBuf(GData* gdata) { return gdata->next; } static __device__ __forceinline__ int32_t GetOutOffset(int32_t idx, GData* gdata) { return idx; } }; const int32_t D = 128; // number of features std::vector<float> GroundTruth( const std::vector<int32_t>& row_offsets, const std::vector<int32_t>& column_indices, const std::vector<float>& vdata, const std::vector<float>& edata) { std::vector<float> ret(vdata.size(), 0); for (size_t u = 0; u < row_offsets.size() - 1; ++u) { for (int32_t eid = row_offsets[u]; eid < row_offsets[u+1]; ++eid) { int32_t v = column_indices[eid]; for (int32_t idx = 0; idx < D; ++idx) { ret[v * D + idx] += vdata[u * D + idx] * edata[eid]; } } } return ret; } int main(int argc, char** argv) { srand(42); // create graph std::vector<int32_t> row_offsets, column_indices; utils::CreateNPGraph(1000, 0.01, row_offsets, column_indices); const int32_t N = row_offsets.size() - 1; const int32_t M = column_indices.size(); std::cout << "#nodes: " << N << " #edges: " << M << " #feats: " << D << std::endl; // copy graph to gpu CUDA_CALL(cudaSetDevice(0)); minigun::IntCsr csr; csr.row_offsets.length = row_offsets.size(); CUDA_CALL(cudaMalloc(&csr.row_offsets.data, sizeof(int32_t) * row_offsets.size())); CUDA_CALL(cudaMemcpy(csr.row_offsets.data, &row_offsets[0], sizeof(int32_t) * row_offsets.size(), cudaMemcpyHostToDevice)); csr.column_indices.length = column_indices.size(); CUDA_CALL(cudaMalloc(&csr.column_indices.data, sizeof(int32_t) * column_indices.size())); CUDA_CALL(cudaMemcpy(csr.column_indices.data, &column_indices[0], sizeof(int32_t) * column_indices.size(), cudaMemcpyHostToDevice)); csr.num_rows = N; csr.num_cols = N; // Create raw eid_mapping minigun::IntArray csr_mapping = utils::arange(0, M, kDLGPU); // Create csr_t and coo minigun::IntCsr csr_t; auto pack = utils::ToReverseCsr(csr, csr_mapping, kDLGPU); csr_t = pack.first; minigun::IntArray csr_t_mapping = pack.second; minigun::IntCoo coo; coo = utils::ToCoo(csr, kDLGPU); minigun::IntSpMat spmat = {&csr, &csr_t, &coo}; // Create stream minigun::advance::RuntimeConfig config; config.ctx = {kDLGPU, 0}; int nt = 1; //utils::_FindNumThreads(D, 32); config.data_num_threads = nt; config.data_num_blocks = 1; CUDA_CALL(cudaStreamCreate(&config.stream)); // Create feature data std::vector<float> vvec(N * D), evec(M); for (int32_t i = 0; i < N * D; ++i) { vvec[i] = (float)rand() / RAND_MAX; } for (int32_t i = 0; i < M; ++i) { evec[i] = (float)rand() / RAND_MAX; } // Copy feature data to gpu GData gdata; gdata.dim = D; CUDA_CALL(cudaMalloc(&gdata.cur, sizeof(float) * N * D)); CUDA_CALL(cudaMemcpy(gdata.cur, &vvec[0], sizeof(float) * N * D, cudaMemcpyHostToDevice)); CUDA_CALL(cudaMalloc(&gdata.next, sizeof(float) * N * D)); CUDA_CALL(cudaMemset(gdata.next, 0, sizeof(float) * N * D)); CUDA_CALL(cudaMalloc(&gdata.weight, sizeof(float) * M)); CUDA_CALL(cudaMemcpy(gdata.weight, &evec[0], sizeof(float) * M, cudaMemcpyHostToDevice)); gdata.eid_mapping = csr_t_mapping.data; CUDA_CALL(cudaDeviceSynchronize()); // Compute ground truth std::vector<float> truth = GroundTruth(row_offsets, column_indices, vvec, evec); typedef minigun::advance::Config<minigun::advance::kDst> Config; minigun::advance::Advance<kDLGPU, int32_t, float, Config, GData, SPMMFunctor>( config, spmat, &gdata); CUDA_CALL(cudaDeviceSynchronize()); // verify output std::vector<float> rst(N * D); CUDA_CALL(cudaMemcpy(&rst[0], gdata.next, sizeof(float) * N * D, cudaMemcpyDeviceToHost)); std::cout << "Correct? " << utils::VecEqual(truth, rst) << std::endl; // free return 0; }
4ed78affe2ded96325bd4c727689677ef00a1fcf.hip
// !!! This is a file automatically generated by hipify!!! #include "common.h" #include "allgather.h" void allgather_h2h(double *send_buf_d, double *recv_buf_d, size_t count, ncclDataType_t dtype, ncclComm_t comm, hipStream_t stream) { int nranks; double *send_buf_h = NULL; double *recv_buf_h = NULL; NCCLCHECK( ncclCommCount(comm, &nranks) ); send_buf_h = (double*)malloc(sizeof(double) * count); recv_buf_h = (double*)malloc(sizeof(double) * count * nranks); CUDACHECK( hipMemcpy(send_buf_h, send_buf_d, sizeof(double) * count, hipMemcpyDeviceToHost) ); NCCLCHECK( ncclAllGather((void*)send_buf_h, (void*)recv_buf_h, count, dtype, comm, stream) ); CUDACHECK( hipMemcpy(recv_buf_d, recv_buf_d, sizeof(double) * count * nranks, hipMemcpyHostToDevice) ); free(recv_buf_h); free(send_buf_h); } void allgather_d2d(double *send_buf_d, double *recv_buf_d, size_t count, ncclDataType_t dtype, ncclComm_t comm, hipStream_t stream) { NCCLCHECK( ncclAllGather((void*)send_buf_d, (void*)recv_buf_d, count, dtype, comm, stream) ); } void allgather_init(const info_t info, double **send_buf_h, double **send_buf_d, double **recv_buf_h, double **recv_buf_d, const size_t count) { int nranks = info.size; /* Allocation and initalization of buffers */ *send_buf_h = (double*)malloc(sizeof(double) * count); *recv_buf_h = (double*)malloc(sizeof(double) * count * nranks); CUDACHECK( hipMalloc((void**)send_buf_d, sizeof(double) * count) ); CUDACHECK( hipMalloc((void**)recv_buf_d, sizeof(double) * count * nranks) ); for (int i=0; i<count; ++i) { (*send_buf_h)[i] = i; } for (int i=0; i<count * nranks; ++i) { (*recv_buf_h)[i] = 0; } /* Host to device */ CUDACHECK( hipMemcpy(*send_buf_d, *send_buf_h, sizeof(double) * count, hipMemcpyHostToDevice) ); CUDACHECK( hipMemcpy(*recv_buf_d, *recv_buf_h, sizeof(double) * count * nranks, hipMemcpyHostToDevice) ); /* Print and barrier */ for (int i=0; i<info.size; i++) { if (info.rank == i) { printf("[%d/%d: %s]: B (%.2f, %.2f, %.2f, %.2f, ...)\n", info.rank, info.size, info.hostname, (*send_buf_h)[0], (*send_buf_h)[1], (*send_buf_h)[2], (*send_buf_h)[3]); } MPI_Barrier(MPI_COMM_WORLD); } if (info.rank == info.size - 1) { printf("Starting broadcast...\n"); } MPI_Barrier(MPI_COMM_WORLD); } void allgather_finalize(const info_t info, double *send_buf_h, double *send_buf_d, double *recv_buf_h, double *recv_buf_d, const size_t count) { int nranks = info.size; /* Device to host */ CUDACHECK( hipMemcpy(recv_buf_h, recv_buf_d, sizeof(double) * count * nranks, hipMemcpyDeviceToHost) ); /* Print and barrier */ for (int i=0; i<info.size; i++) { if (info.rank == i) { printf("[%d/%d: %s]: A (\n", info.rank, info.size, info.hostname); for (int j=0; j<info.size; j++) { printf(" (%.2f, %.2f, %.2f, %.2f, ...),\n", recv_buf_h[0 + j * count], recv_buf_h[1 + j * count], recv_buf_h[2 + j * count], recv_buf_h[3 + j * count]); } printf(" )\n"); } MPI_Barrier(MPI_COMM_WORLD); } if (info.rank == info.size - 1) { printf("Broadcast done.\n"); } MPI_Barrier(MPI_COMM_WORLD); /* Free buffers */ CUDACHECK( hipFree(send_buf_d) ); CUDACHECK( hipFree(recv_buf_d) ); free(send_buf_h); free(recv_buf_h); }
4ed78affe2ded96325bd4c727689677ef00a1fcf.cu
#include "common.h" #include "allgather.h" void allgather_h2h(double *send_buf_d, double *recv_buf_d, size_t count, ncclDataType_t dtype, ncclComm_t comm, cudaStream_t stream) { int nranks; double *send_buf_h = NULL; double *recv_buf_h = NULL; NCCLCHECK( ncclCommCount(comm, &nranks) ); send_buf_h = (double*)malloc(sizeof(double) * count); recv_buf_h = (double*)malloc(sizeof(double) * count * nranks); CUDACHECK( cudaMemcpy(send_buf_h, send_buf_d, sizeof(double) * count, cudaMemcpyDeviceToHost) ); NCCLCHECK( ncclAllGather((void*)send_buf_h, (void*)recv_buf_h, count, dtype, comm, stream) ); CUDACHECK( cudaMemcpy(recv_buf_d, recv_buf_d, sizeof(double) * count * nranks, cudaMemcpyHostToDevice) ); free(recv_buf_h); free(send_buf_h); } void allgather_d2d(double *send_buf_d, double *recv_buf_d, size_t count, ncclDataType_t dtype, ncclComm_t comm, cudaStream_t stream) { NCCLCHECK( ncclAllGather((void*)send_buf_d, (void*)recv_buf_d, count, dtype, comm, stream) ); } void allgather_init(const info_t info, double **send_buf_h, double **send_buf_d, double **recv_buf_h, double **recv_buf_d, const size_t count) { int nranks = info.size; /* Allocation and initalization of buffers */ *send_buf_h = (double*)malloc(sizeof(double) * count); *recv_buf_h = (double*)malloc(sizeof(double) * count * nranks); CUDACHECK( cudaMalloc((void**)send_buf_d, sizeof(double) * count) ); CUDACHECK( cudaMalloc((void**)recv_buf_d, sizeof(double) * count * nranks) ); for (int i=0; i<count; ++i) { (*send_buf_h)[i] = i; } for (int i=0; i<count * nranks; ++i) { (*recv_buf_h)[i] = 0; } /* Host to device */ CUDACHECK( cudaMemcpy(*send_buf_d, *send_buf_h, sizeof(double) * count, cudaMemcpyHostToDevice) ); CUDACHECK( cudaMemcpy(*recv_buf_d, *recv_buf_h, sizeof(double) * count * nranks, cudaMemcpyHostToDevice) ); /* Print and barrier */ for (int i=0; i<info.size; i++) { if (info.rank == i) { printf("[%d/%d: %s]: B (%.2f, %.2f, %.2f, %.2f, ...)\n", info.rank, info.size, info.hostname, (*send_buf_h)[0], (*send_buf_h)[1], (*send_buf_h)[2], (*send_buf_h)[3]); } MPI_Barrier(MPI_COMM_WORLD); } if (info.rank == info.size - 1) { printf("Starting broadcast...\n"); } MPI_Barrier(MPI_COMM_WORLD); } void allgather_finalize(const info_t info, double *send_buf_h, double *send_buf_d, double *recv_buf_h, double *recv_buf_d, const size_t count) { int nranks = info.size; /* Device to host */ CUDACHECK( cudaMemcpy(recv_buf_h, recv_buf_d, sizeof(double) * count * nranks, cudaMemcpyDeviceToHost) ); /* Print and barrier */ for (int i=0; i<info.size; i++) { if (info.rank == i) { printf("[%d/%d: %s]: A (\n", info.rank, info.size, info.hostname); for (int j=0; j<info.size; j++) { printf(" (%.2f, %.2f, %.2f, %.2f, ...),\n", recv_buf_h[0 + j * count], recv_buf_h[1 + j * count], recv_buf_h[2 + j * count], recv_buf_h[3 + j * count]); } printf(" )\n"); } MPI_Barrier(MPI_COMM_WORLD); } if (info.rank == info.size - 1) { printf("Broadcast done.\n"); } MPI_Barrier(MPI_COMM_WORLD); /* Free buffers */ CUDACHECK( cudaFree(send_buf_d) ); CUDACHECK( cudaFree(recv_buf_d) ); free(send_buf_h); free(recv_buf_h); }
41b028ca113f5c9941d3d9e9608622b3479036fa.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <[email protected]> and Pingfan Li <[email protected]> #define VC_VARIANT "linear_warp" #include <hipcub/hipcub.hpp> #include "vc.h" #include "timer.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include "worklistc.h" __device__ __forceinline__ void assignColor(unsigned int *forbiddenColors, int *colors, int node) { int i; for (i = 0; i < MAXCOLOR/32; i++) { int pos = __ffs(forbiddenColors[i]); if(pos) { colors[node] = i * 32 + pos - 1; break; } } assert(i < MAXCOLOR/32); } __global__ void first_fit(int *row_offsets, int *column_indices, Worklist2 inwl, int *colors) { int id = blockIdx.x * blockDim.x + threadIdx.x; unsigned forbiddenColors[MAXCOLOR/32+1]; int vertex; if (inwl.pop_id(id, vertex)) { int row_begin = row_offsets[vertex]; int row_end = row_offsets[vertex + 1]; for (int j = 0; j < MAXCOLOR/32; j++) forbiddenColors[j] = 0xffffffff; for (int offset = row_begin; offset < row_end; offset ++) { int neighbor = column_indices[offset]; int color = colors[neighbor]; forbiddenColors[color / 32] &= ~(1 << (color % 32)); } assignColor(forbiddenColors, colors, vertex); } } __global__ void conflict_resolve(int nitems, int *row_offsets, int *column_indices, Worklist2 inwl, Worklist2 outwl, int *colors) { /* int id = blockIdx.x * blockDim.x + threadIdx.x; int vertex; int conflicted = 0; if (inwl.pop_id(id, vertex)) { int row_begin = row_offsets[vertex]; int row_end = row_offsets[vertex + 1]; for (int offset = row_begin; offset < row_end; offset ++) { int neighbor = column_indices[offset]; if (colors[vertex] == colors[neighbor] && vertex < neighbor) { conflicted = 1; colors[vertex] = MAXCOLOR; break; } } } if(conflicted) outwl.push(vertex); //*/ ///* __shared__ int ptrs[BLOCK_SIZE/WARP_SIZE][2]; __shared__ bool conflicted[BLOCK_SIZE/WARP_SIZE]; const int thread_id = BLOCK_SIZE * blockIdx.x + threadIdx.x; // global thread index const int thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp const int warp_id = thread_id / WARP_SIZE; // global warp index const int warp_lane = threadIdx.x / WARP_SIZE; // warp index within the CTA const int num_warps = (BLOCK_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps for(int index = warp_id; index < nitems; index += num_warps) { int src; inwl.pop_id(index, src); if(thread_lane < 2) ptrs[warp_lane][thread_lane] = row_offsets[src + thread_lane]; const int row_start = ptrs[warp_lane][0]; const int row_end = ptrs[warp_lane][1]; if (thread_lane == 0) conflicted[warp_lane] = false; __syncthreads(); bool is_conflicted = false; for(int offset = row_start + thread_lane; offset < row_end; offset += WARP_SIZE) { int dst = column_indices[offset]; if(src < dst && colors[src] == colors[dst]) is_conflicted = true; if(__any(is_conflicted)) { conflicted[warp_lane] = true; break; } } if (thread_lane == 0 && conflicted[warp_lane]) { colors[src] = MAXCOLOR; outwl.push(src); } } //*/ } int VCSolver(int m, int nnz, int *row_offsets, int *column_indices, int *colors) { int num_colors = 0, iter = 0; int *d_row_offsets, *d_column_indices, *d_colors; CUDA_SAFE_CALL(hipMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(hipMalloc((void **)&d_colors, m * sizeof(int))); CUDA_SAFE_CALL(hipMemcpy(d_row_offsets, row_offsets, (m + 1) * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_column_indices, column_indices, nnz * sizeof(int), hipMemcpyHostToDevice)); CUDA_SAFE_CALL(hipMemcpy(d_colors, colors, m * sizeof(int), hipMemcpyHostToDevice)); int nitems = m; Worklist2 inwl(m), outwl(m); Worklist2 *inwlptr = &inwl, *outwlptr = &outwl; for(int i = 0; i < m; i ++) inwl.h_queue[i] = i; inwl.set_index(m); CUDA_SAFE_CALL(hipMemcpy(inwl.d_queue, inwl.h_queue, m * sizeof(int), hipMemcpyHostToDevice)); //thrust::sequence(thrust::device, inwl.d_queue, inwl.d_queue + m); const int nthreads = BLOCK_SIZE; hipDeviceProp_t deviceProp; CUDA_SAFE_CALL(hipGetDeviceProperties(&deviceProp, 0)); const int nSM = deviceProp.multiProcessorCount; const int max_blocks_per_SM = maximum_residency(conflict_resolve, nthreads, 0); const int max_blocks = max_blocks_per_SM * nSM; CUDA_SAFE_CALL(hipDeviceSynchronize()); printf("Launching CUDA VC solver (%d threads/CTA) ...\n", BLOCK_SIZE); Timer t; t.Start(); while (nitems > 0) { iter ++; const int mblocks = (nitems - 1) / nthreads + 1; hipLaunchKernelGGL(( first_fit), dim3(mblocks), dim3(nthreads), 0, 0, d_row_offsets, d_column_indices, *inwlptr, d_colors); const int nblocks = ::min(max_blocks, DIVIDE_INTO(nitems, WARPS_PER_BLOCK)); hipLaunchKernelGGL(( conflict_resolve), dim3(nblocks), dim3(nthreads), 0, 0, nitems, d_row_offsets, d_column_indices, *inwlptr, *outwlptr, d_colors); nitems = outwlptr->nitems(); Worklist2 * tmp = inwlptr; inwlptr = outwlptr; outwlptr = tmp; outwlptr->reset(); } CUDA_SAFE_CALL(hipDeviceSynchronize()); t.Stop(); CUDA_SAFE_CALL(hipMemcpy(colors, d_colors, m * sizeof(int), hipMemcpyDeviceToHost)); #pragma omp parallel for reduction(max : num_colors) for (int n = 0; n < m; n ++) num_colors = max(num_colors, colors[n]); num_colors ++; printf("\titerations = %d.\n", iter); printf("\truntime[%s] = %f ms, num_colors = %d.\n", VC_VARIANT, t.Millisecs(), num_colors); CUDA_SAFE_CALL(hipFree(d_row_offsets)); CUDA_SAFE_CALL(hipFree(d_column_indices)); CUDA_SAFE_CALL(hipFree(d_colors)); return num_colors; }
41b028ca113f5c9941d3d9e9608622b3479036fa.cu
// Copyright 2016, National University of Defense Technology // Authors: Xuhao Chen <[email protected]> and Pingfan Li <[email protected]> #define VC_VARIANT "linear_warp" #include <cub/cub.cuh> #include "vc.h" #include "timer.h" #include "cuda_launch_config.hpp" #include "cutil_subset.h" #include "worklistc.h" __device__ __forceinline__ void assignColor(unsigned int *forbiddenColors, int *colors, int node) { int i; for (i = 0; i < MAXCOLOR/32; i++) { int pos = __ffs(forbiddenColors[i]); if(pos) { colors[node] = i * 32 + pos - 1; break; } } assert(i < MAXCOLOR/32); } __global__ void first_fit(int *row_offsets, int *column_indices, Worklist2 inwl, int *colors) { int id = blockIdx.x * blockDim.x + threadIdx.x; unsigned forbiddenColors[MAXCOLOR/32+1]; int vertex; if (inwl.pop_id(id, vertex)) { int row_begin = row_offsets[vertex]; int row_end = row_offsets[vertex + 1]; for (int j = 0; j < MAXCOLOR/32; j++) forbiddenColors[j] = 0xffffffff; for (int offset = row_begin; offset < row_end; offset ++) { int neighbor = column_indices[offset]; int color = colors[neighbor]; forbiddenColors[color / 32] &= ~(1 << (color % 32)); } assignColor(forbiddenColors, colors, vertex); } } __global__ void conflict_resolve(int nitems, int *row_offsets, int *column_indices, Worklist2 inwl, Worklist2 outwl, int *colors) { /* int id = blockIdx.x * blockDim.x + threadIdx.x; int vertex; int conflicted = 0; if (inwl.pop_id(id, vertex)) { int row_begin = row_offsets[vertex]; int row_end = row_offsets[vertex + 1]; for (int offset = row_begin; offset < row_end; offset ++) { int neighbor = column_indices[offset]; if (colors[vertex] == colors[neighbor] && vertex < neighbor) { conflicted = 1; colors[vertex] = MAXCOLOR; break; } } } if(conflicted) outwl.push(vertex); //*/ ///* __shared__ int ptrs[BLOCK_SIZE/WARP_SIZE][2]; __shared__ bool conflicted[BLOCK_SIZE/WARP_SIZE]; const int thread_id = BLOCK_SIZE * blockIdx.x + threadIdx.x; // global thread index const int thread_lane = threadIdx.x & (WARP_SIZE-1); // thread index within the warp const int warp_id = thread_id / WARP_SIZE; // global warp index const int warp_lane = threadIdx.x / WARP_SIZE; // warp index within the CTA const int num_warps = (BLOCK_SIZE / WARP_SIZE) * gridDim.x; // total number of active warps for(int index = warp_id; index < nitems; index += num_warps) { int src; inwl.pop_id(index, src); if(thread_lane < 2) ptrs[warp_lane][thread_lane] = row_offsets[src + thread_lane]; const int row_start = ptrs[warp_lane][0]; const int row_end = ptrs[warp_lane][1]; if (thread_lane == 0) conflicted[warp_lane] = false; __syncthreads(); bool is_conflicted = false; for(int offset = row_start + thread_lane; offset < row_end; offset += WARP_SIZE) { int dst = column_indices[offset]; if(src < dst && colors[src] == colors[dst]) is_conflicted = true; if(__any(is_conflicted)) { conflicted[warp_lane] = true; break; } } if (thread_lane == 0 && conflicted[warp_lane]) { colors[src] = MAXCOLOR; outwl.push(src); } } //*/ } int VCSolver(int m, int nnz, int *row_offsets, int *column_indices, int *colors) { int num_colors = 0, iter = 0; int *d_row_offsets, *d_column_indices, *d_colors; CUDA_SAFE_CALL(cudaMalloc((void **)&d_row_offsets, (m + 1) * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_column_indices, nnz * sizeof(int))); CUDA_SAFE_CALL(cudaMalloc((void **)&d_colors, m * sizeof(int))); CUDA_SAFE_CALL(cudaMemcpy(d_row_offsets, row_offsets, (m + 1) * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_column_indices, column_indices, nnz * sizeof(int), cudaMemcpyHostToDevice)); CUDA_SAFE_CALL(cudaMemcpy(d_colors, colors, m * sizeof(int), cudaMemcpyHostToDevice)); int nitems = m; Worklist2 inwl(m), outwl(m); Worklist2 *inwlptr = &inwl, *outwlptr = &outwl; for(int i = 0; i < m; i ++) inwl.h_queue[i] = i; inwl.set_index(m); CUDA_SAFE_CALL(cudaMemcpy(inwl.d_queue, inwl.h_queue, m * sizeof(int), cudaMemcpyHostToDevice)); //thrust::sequence(thrust::device, inwl.d_queue, inwl.d_queue + m); const int nthreads = BLOCK_SIZE; cudaDeviceProp deviceProp; CUDA_SAFE_CALL(cudaGetDeviceProperties(&deviceProp, 0)); const int nSM = deviceProp.multiProcessorCount; const int max_blocks_per_SM = maximum_residency(conflict_resolve, nthreads, 0); const int max_blocks = max_blocks_per_SM * nSM; CUDA_SAFE_CALL(cudaDeviceSynchronize()); printf("Launching CUDA VC solver (%d threads/CTA) ...\n", BLOCK_SIZE); Timer t; t.Start(); while (nitems > 0) { iter ++; const int mblocks = (nitems - 1) / nthreads + 1; first_fit<<<mblocks, nthreads>>>(d_row_offsets, d_column_indices, *inwlptr, d_colors); const int nblocks = std::min(max_blocks, DIVIDE_INTO(nitems, WARPS_PER_BLOCK)); conflict_resolve<<<nblocks, nthreads>>>(nitems, d_row_offsets, d_column_indices, *inwlptr, *outwlptr, d_colors); nitems = outwlptr->nitems(); Worklist2 * tmp = inwlptr; inwlptr = outwlptr; outwlptr = tmp; outwlptr->reset(); } CUDA_SAFE_CALL(cudaDeviceSynchronize()); t.Stop(); CUDA_SAFE_CALL(cudaMemcpy(colors, d_colors, m * sizeof(int), cudaMemcpyDeviceToHost)); #pragma omp parallel for reduction(max : num_colors) for (int n = 0; n < m; n ++) num_colors = max(num_colors, colors[n]); num_colors ++; printf("\titerations = %d.\n", iter); printf("\truntime[%s] = %f ms, num_colors = %d.\n", VC_VARIANT, t.Millisecs(), num_colors); CUDA_SAFE_CALL(cudaFree(d_row_offsets)); CUDA_SAFE_CALL(cudaFree(d_column_indices)); CUDA_SAFE_CALL(cudaFree(d_colors)); return num_colors; }
7824842bb88f95c689e7da640e35ea5b42675f6a.hip
// !!! This is a file automatically generated by hipify!!! #include <cstdio> #include <cstdint> #include <cstdlib> #include <algorithm> #include "../utils/SyncedMemory.h" #include "../utils/Timer.h" #include "pgm.h" #include "lab3.h" #include "fstream" using namespace std; #define CHECK {\ auto e = hipDeviceSynchronize();\ if (e != hipSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, hipGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { if (argc != 13) { printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output> <linear(0 for false, 1 for true)> <base> <power_base> <log_x_base> <start_scale(0 for 1/8, 3 for original)> <scale_terminal(1 for 1/8, 4 for original)>\n", argv[0]); abort(); } bool sucb, suct, sucm; int wb, hb, cb, wt, ht, ct, wm, hm, cm; int wtest, htest, ctest; bool suctest; auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]); auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]); auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]); auto imgTest = ReadNetpbm(wtest, htest, ctest, suctest, "lab3_test/output.ppm"); if (not (sucb and suct and sucm)) { puts("Something wrong with reading the input image files."); abort(); } if (wt != wm or ht != hm) { puts("The mask and target image must have the same size."); abort(); } if (cm != 1) { puts("The mask image must be mono-colored."); abort(); } if (cb != 3 or ct != 3) { puts("The background and target image must be colored."); abort(); } fstream file; file.open("test.csv", ios::out|ios::app); if(!file) { cerr<<"test file generator error!\n"; exit(1); } const int oy = atoi(argv[4]), ox = atoi(argv[5]); const int linear = atoi(argv[7]), base = atoi(argv[8]), start = atoi(argv[11]), testL = atoi(argv[12]); const float dre = atof(argv[9]), bScale = atof(argv[10]); const int SIZEB = wb*hb*3; const int SIZET = wt*ht*3; const int SIZEM = wm*hm; MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB); auto background_s = background.CreateSync(SIZEB); auto target_s = target.CreateSync(SIZET); auto mask_s = mask.CreateSync(SIZEM); auto output_s = output.CreateSync(SIZEB); float *background_cpu = background_s.get_cpu_wo(); float *target_cpu = target_s.get_cpu_wo(); float *mask_cpu = mask_s.get_cpu_wo(); copy(imgb.get(), imgb.get()+SIZEB, background_cpu); copy(imgt.get(), imgt.get()+SIZET, target_cpu); copy(imgm.get(), imgm.get()+SIZEM, mask_cpu); Timer timer; if(linear == 0) { timer.Start(); PoissonImageCloning( background_s.get_gpu_ro(), target_s.get_gpu_ro(), mask_s.get_gpu_ro(), output_s.get_gpu_wo(), wb, hb, wt, ht, oy, ox, base, false, dre, bScale, start, testL ); } else { timer.Start(); PoissonImageCloning( background_s.get_gpu_ro(), target_s.get_gpu_ro(), mask_s.get_gpu_ro(), output_s.get_gpu_wo(), wb, hb, wt, ht, oy, ox, base, true, dre, bScale, start, testL ); } timer.Pause(); unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]); const float *o_cpu = output_s.get_cpu_ro(); transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); }); float diff = 0.0f; for(int i=0;i<wt;i++) { for(int j=0;j<ht;j++) { if(imgm[i*wt+j] > 127.0f && ox+i < wb && oy+j < hb) { int bp = ((oy+j)*wb+ox+i)*3; diff += (imgTest[bp] + imgTest[bp+1] + imgTest[bp+2] - o.get()[bp] - o.get()[bp+1] - o.get()[bp+2])/3; } } } file<<argv[7]<<','<<argv[8]<<','<<argv[9]<<','<<argv[10]<<','<<argv[11]<<','<<argv[12]<<','<<diff/wt/ht<<','<<timer.get_count()<<'\n'; file.close(); WritePPM(o.get(), wb, hb, argv[6]); return 0; }
7824842bb88f95c689e7da640e35ea5b42675f6a.cu
#include <cstdio> #include <cstdint> #include <cstdlib> #include <algorithm> #include "../utils/SyncedMemory.h" #include "../utils/Timer.h" #include "pgm.h" #include "lab3.h" #include "fstream" using namespace std; #define CHECK {\ auto e = cudaDeviceSynchronize();\ if (e != cudaSuccess) {\ printf("At " __FILE__ ":%d, %s\n", __LINE__, cudaGetErrorString(e));\ abort();\ }\ } int main(int argc, char **argv) { if (argc != 13) { printf("Usage: %s <background> <target> <mask> <offset x> <offset y> <output> <linear(0 for false, 1 for true)> <base> <power_base> <log_x_base> <start_scale(0 for 1/8, 3 for original)> <scale_terminal(1 for 1/8, 4 for original)>\n", argv[0]); abort(); } bool sucb, suct, sucm; int wb, hb, cb, wt, ht, ct, wm, hm, cm; int wtest, htest, ctest; bool suctest; auto imgb = ReadNetpbm(wb, hb, cb, sucb, argv[1]); auto imgt = ReadNetpbm(wt, ht, ct, suct, argv[2]); auto imgm = ReadNetpbm(wm, hm, cm, sucm, argv[3]); auto imgTest = ReadNetpbm(wtest, htest, ctest, suctest, "lab3_test/output.ppm"); if (not (sucb and suct and sucm)) { puts("Something wrong with reading the input image files."); abort(); } if (wt != wm or ht != hm) { puts("The mask and target image must have the same size."); abort(); } if (cm != 1) { puts("The mask image must be mono-colored."); abort(); } if (cb != 3 or ct != 3) { puts("The background and target image must be colored."); abort(); } fstream file; file.open("test.csv", ios::out|ios::app); if(!file) { cerr<<"test file generator error!\n"; exit(1); } const int oy = atoi(argv[4]), ox = atoi(argv[5]); const int linear = atoi(argv[7]), base = atoi(argv[8]), start = atoi(argv[11]), testL = atoi(argv[12]); const float dre = atof(argv[9]), bScale = atof(argv[10]); const int SIZEB = wb*hb*3; const int SIZET = wt*ht*3; const int SIZEM = wm*hm; MemoryBuffer<float> background(SIZEB), target(SIZET), mask(SIZEM), output(SIZEB); auto background_s = background.CreateSync(SIZEB); auto target_s = target.CreateSync(SIZET); auto mask_s = mask.CreateSync(SIZEM); auto output_s = output.CreateSync(SIZEB); float *background_cpu = background_s.get_cpu_wo(); float *target_cpu = target_s.get_cpu_wo(); float *mask_cpu = mask_s.get_cpu_wo(); copy(imgb.get(), imgb.get()+SIZEB, background_cpu); copy(imgt.get(), imgt.get()+SIZET, target_cpu); copy(imgm.get(), imgm.get()+SIZEM, mask_cpu); Timer timer; if(linear == 0) { timer.Start(); PoissonImageCloning( background_s.get_gpu_ro(), target_s.get_gpu_ro(), mask_s.get_gpu_ro(), output_s.get_gpu_wo(), wb, hb, wt, ht, oy, ox, base, false, dre, bScale, start, testL ); } else { timer.Start(); PoissonImageCloning( background_s.get_gpu_ro(), target_s.get_gpu_ro(), mask_s.get_gpu_ro(), output_s.get_gpu_wo(), wb, hb, wt, ht, oy, ox, base, true, dre, bScale, start, testL ); } timer.Pause(); unique_ptr<uint8_t[]> o(new uint8_t[SIZEB]); const float *o_cpu = output_s.get_cpu_ro(); transform(o_cpu, o_cpu+SIZEB, o.get(), [](float f) -> uint8_t { return max(min(int(f+0.5f), 255), 0); }); float diff = 0.0f; for(int i=0;i<wt;i++) { for(int j=0;j<ht;j++) { if(imgm[i*wt+j] > 127.0f && ox+i < wb && oy+j < hb) { int bp = ((oy+j)*wb+ox+i)*3; diff += (imgTest[bp] + imgTest[bp+1] + imgTest[bp+2] - o.get()[bp] - o.get()[bp+1] - o.get()[bp+2])/3; } } } file<<argv[7]<<','<<argv[8]<<','<<argv[9]<<','<<argv[10]<<','<<argv[11]<<','<<argv[12]<<','<<diff/wt/ht<<','<<timer.get_count()<<'\n'; file.close(); WritePPM(o.get(), wb, hb, argv[6]); return 0; }
dd4d7802e61564c8ad940a94be7a61157e7888f7.hip
// !!! This is a file automatically generated by hipify!!! #include "spline.h" #include "solve_system.h" #include <hip/hip_runtime.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <assert.h> #define cuAlloc(x, y) hipMalloc((void**)&(x), (y)) #define copyToCard(x, y, z) hipMemcpy((x), (y), (z), hipMemcpyHostToDevice) #define copyToHost(x, y, z) hipMemcpy((x), (y), (z), hipMemcpyDeviceToHost) static Point **m_generatePoints(Point **, int, int, int); __global__ void fillA(float *, Point *); __global__ void fillb(float *, Point *); __global__ void solveSplineCu(CubicCurve *, Point*); __global__ void fillCubicCurves(CubicCurve*, Point*, float*); /** * Checks that numSets, numPoints, and granularity are all above zero. * If granularity is 1, we just return the start points unchanged. * Otherwise, we generate points. */ Point** generatePoints(Point ** start, int numSets, int numPoints, int gran) { assert(numSets > 0); assert(numPoints > 0); assert(gran > 0); if (gran == 1) { return start; } else { return m_generatePoints(start, numSets, numPoints, gran); } } static Point** m_generatePoints(Point **start, int numSets, int numPoints, int gran) { static int sets = 1, granularity = 1, points = 2; static Point** ret = (Point **) malloc(sets * sizeof(Point *)); static Point* ans = (Point*) malloc((points - 1) * sets * granularity * sizeof(Point)); CubicCurve *ss_d; int set, totFinalPoints = (numPoints - 1) * numSets * gran; Point *ans_d; if (numSets != sets) { sets = numSets; free(ret); ret = (Point **) malloc(numSets * sizeof(Point *)); } if (numSets != sets || gran != granularity || numPoints != points) { sets = numSets; granularity = gran; points = numPoints; free(ans); ans = (Point*) malloc(totFinalPoints * sizeof(Point)); } ss_d = generateSplines(start, numSets, numPoints); hipDeviceSynchronize(); cuAlloc(ans_d, totFinalPoints * sizeof(Point)); for (set = 0; set < numSets; set++) { hipLaunchKernelGGL(( solveSplineCu), dim3((numPoints - 1)), dim3(gran), 0, 0, &(ss_d[set * (numPoints - 1)]), &(ans_d[set * (numPoints - 1) * gran])); } copyToHost(ans, ans_d, totFinalPoints * sizeof(Point)); for (set = 0; set < numSets; set++) { ret[set] = &(ans[set * (numPoints - 1) * gran]); } hipFree(ans_d); hipFree(ss_d); return ret; } /** * Generates a list of splines from the list of points pts. The number of * elements in pts should equal num. The number of splines returned is then * num - 1. */ CubicCurve* generateSplines(Point **pts, int splines, int num) { CubicCurve *ss_d; int rows = num - 2, elems = (rows * rows), numCurves = num - 1; float *A_d, *b_d; float *A = (float *) calloc(elems, sizeof(float)); hipError_t error; Point *pts_d; // Malloc on the card: Ax = b hipMalloc((void**)&A_d, elems * sizeof(float)); checkCudaError("spline: error mallocing on device:"); hipMalloc((void**)&b_d, splines * rows * sizeof(float)); checkCudaError("spline: error mallocing on device:"); hipMalloc((void**)&pts_d, splines * num * sizeof(Point)); checkCudaError("spline: error mallocing on device:"); hipMalloc((void**)&ss_d, splines * numCurves * sizeof(CubicCurve)); checkCudaError("spline: error mallocing on device:"); // Copies A to A_d: ensures the CUDA buffer is initially set to 0's hipMemcpy(A_d, A, elems * sizeof(float), hipMemcpyHostToDevice); free(A); // Copy point data to card. hipMemcpy(pts_d, pts[0], num * splines * sizeof(Point), hipMemcpyHostToDevice); checkCudaError("spline.cu: error memcpying to device:"); hipLaunchKernelGGL(( fillA), dim3(1), dim3(rows), 0, 0, A_d, pts_d); checkCudaError("spline.cu: error filling A matrix:"); // Perform computation to fill A_d and b_d. // A: coefficients of z-values in equations. Each row of matrix is one eqn. // b: right-hand-side of the equations. // (Ax = b), where A is a matrix, and x and b are column vectors. for (int i = 0; i <= splines / MAX_GRID_SIZE; i++) { int nBlks = i * MAX_GRID_SIZE; int blocks = (i == splines / MAX_GRID_SIZE) ? splines % MAX_GRID_SIZE : MAX_GRID_SIZE; hipLaunchKernelGGL(( fillb), dim3(blocks), dim3(rows), 0, 0, &b_d[nBlks * rows], &pts_d[nBlks * rows]); checkCudaError("spline.cu: error filling b vector:"); } // Make sure fillMatrices is done. hipDeviceSynchronize(); float *x_d = solveSystem(A_d, b_d, splines, rows); hipFree(A_d); hipFree(b_d); for (int i = 0; i <= splines / MAX_GRID_SIZE; i++) { int nBlks = i * MAX_GRID_SIZE; int blocks = (i == splines / MAX_GRID_SIZE) ? splines % MAX_GRID_SIZE : MAX_GRID_SIZE; hipLaunchKernelGGL(( fillCubicCurves), dim3(blocks), dim3(num - 1), 0, 0, &ss_d[nBlks * (num - 1)], &pts_d[nBlks * num], &x_d[nBlks * (num - 2)]); checkCudaError("spline.cu: error filling cubic curves:"); } hipFree(x_d); hipFree(pts_d); return ss_d; } __shared__ float h[TILE_SIZE]; __global__ void fillCubicCurves(CubicCurve* ccs, Point* pts, float* x) { // threadDim = numPoints int numPoints = blockDim.x; int numCurves = numPoints; // j = spNum = spline number int spNum = blockIdx.x; // i = ptNum = point number int ptNum = threadIdx.x; int rows = numPoints - 1; // Now that we have the matrix, solve for x. X then holds our Z-values // (the second derivative at the points). CubicCurve *cc = ccs + (spNum * numCurves); cc[0].z1 = 0; cc[spNum - 2].z2 = 0; // Fill CubicCurve array with Z-values and points if (ptNum != 0) cc[ptNum].z1 = x[spNum * rows + (ptNum - 1)]; cc[ptNum].p1 = pts[spNum * (numPoints + 1) + ptNum]; cc[ptNum].p2 = pts[spNum * (numPoints + 1) + (ptNum + 1)]; if (ptNum != rows) cc[ptNum].z2 = x[spNum * rows + ptNum]; } /** * Fills matrix A with coefficients of equations (where Z-values are vars) * and fills vector b with the right-hand-sides of the linear equation. * The length of pts should be 2 more than blockDim.x. * GridDim.x should be the number of matrices to fill. */ __global__ void fillA(float *A, Point *pts) { int j = 0; int pos = threadIdx.x; int rows = blockDim.x; // Initialize h values: h-sub-i is the distance between point i+1 and point // i on the x-axis. if (pos == rows - 1) h[pos + 1] = pts[pos + 2].x - pts[pos + 1].x; h[pos] = pts[pos + 1].x - pts[pos].x; __syncthreads(); j = (pos % rows == 0) ? 0 : pos - 1; // If we're not on row 0, fill the element left of the diagonal with h[pos] if (pos != 0) A[pos * rows + j++] = h[pos]; // For every row, fill the diagonal element A[pos * rows + j++] = 2*(h[pos + 1] + h[pos]); // If we're not on the last row, fill the element right of the diag. if (pos != rows - 1) A[pos * rows + j++] = h[pos + 1]; } __global__ void fillb(float *b, Point *pts) { int pos = blockIdx.x * blockDim.x + threadIdx.x, x = threadIdx.x; int ptpos = blockIdx.x * (blockDim.x + 2) + threadIdx.x; int rows = blockDim.x; // Initialize h values: h-sub-i is the distance between point i+1 and point // i on the x-axis. if (x == rows - 1) h[x + 1] = pts[ptpos + 2].x - pts[ptpos + 1].x; h[x] = pts[ptpos + 1].x - pts[ptpos].x; __syncthreads(); // Fill b with this formula. if (x < rows) b[pos] = 6*((pts[ptpos + 2].y - pts[ptpos + 1].y) / h[x + 1] - (pts[ptpos + 1].y - pts[ptpos].y) / h[x]); } /** * Takes the cubic spline described by cc and solves it. * blockDim.x indicates the granularity of the answer per curve. * gridDim.x indicates the number of curves to solve. * * Makes blockDim.x - 1 points between each two sets of curves. */ __global__ void solveSplineCu(CubicCurve *cc, Point *ans) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int cc_num = blockIdx.x; int x_num = threadIdx.x; CubicCurve cv = cc[cc_num]; float h = cv.p2.x - cv.p1.x; float x = cv.p1.x + (h / blockDim.x) * ((float) x_num); float fx1 = x - cv.p1.x; float fx2 = cv.p2.x - x; ans[idx].x = x; ans[idx].y = (cv.z2 / (6.0*h)) * (fx1 * fx1 * fx1) + (cv.z1 / (6.0*h)) * (fx2 * fx2 * fx2) + (cv.p2.y / h - (h / 6.0) * cv.z2) * fx1 + (cv.p1.y / h - (h / 6.0) * cv.z1) * fx2; }
dd4d7802e61564c8ad940a94be7a61157e7888f7.cu
#include "spline.h" #include "solve_system.h" #include <cuda.h> #include <stdio.h> #include <math.h> #include <stdlib.h> #include <assert.h> #define cuAlloc(x, y) cudaMalloc((void**)&(x), (y)) #define copyToCard(x, y, z) cudaMemcpy((x), (y), (z), cudaMemcpyHostToDevice) #define copyToHost(x, y, z) cudaMemcpy((x), (y), (z), cudaMemcpyDeviceToHost) static Point **m_generatePoints(Point **, int, int, int); __global__ void fillA(float *, Point *); __global__ void fillb(float *, Point *); __global__ void solveSplineCu(CubicCurve *, Point*); __global__ void fillCubicCurves(CubicCurve*, Point*, float*); /** * Checks that numSets, numPoints, and granularity are all above zero. * If granularity is 1, we just return the start points unchanged. * Otherwise, we generate points. */ Point** generatePoints(Point ** start, int numSets, int numPoints, int gran) { assert(numSets > 0); assert(numPoints > 0); assert(gran > 0); if (gran == 1) { return start; } else { return m_generatePoints(start, numSets, numPoints, gran); } } static Point** m_generatePoints(Point **start, int numSets, int numPoints, int gran) { static int sets = 1, granularity = 1, points = 2; static Point** ret = (Point **) malloc(sets * sizeof(Point *)); static Point* ans = (Point*) malloc((points - 1) * sets * granularity * sizeof(Point)); CubicCurve *ss_d; int set, totFinalPoints = (numPoints - 1) * numSets * gran; Point *ans_d; if (numSets != sets) { sets = numSets; free(ret); ret = (Point **) malloc(numSets * sizeof(Point *)); } if (numSets != sets || gran != granularity || numPoints != points) { sets = numSets; granularity = gran; points = numPoints; free(ans); ans = (Point*) malloc(totFinalPoints * sizeof(Point)); } ss_d = generateSplines(start, numSets, numPoints); cudaDeviceSynchronize(); cuAlloc(ans_d, totFinalPoints * sizeof(Point)); for (set = 0; set < numSets; set++) { solveSplineCu<<<(numPoints - 1), gran>>> (&(ss_d[set * (numPoints - 1)]), &(ans_d[set * (numPoints - 1) * gran])); } copyToHost(ans, ans_d, totFinalPoints * sizeof(Point)); for (set = 0; set < numSets; set++) { ret[set] = &(ans[set * (numPoints - 1) * gran]); } cudaFree(ans_d); cudaFree(ss_d); return ret; } /** * Generates a list of splines from the list of points pts. The number of * elements in pts should equal num. The number of splines returned is then * num - 1. */ CubicCurve* generateSplines(Point **pts, int splines, int num) { CubicCurve *ss_d; int rows = num - 2, elems = (rows * rows), numCurves = num - 1; float *A_d, *b_d; float *A = (float *) calloc(elems, sizeof(float)); cudaError_t error; Point *pts_d; // Malloc on the card: Ax = b cudaMalloc((void**)&A_d, elems * sizeof(float)); checkCudaError("spline: error mallocing on device:"); cudaMalloc((void**)&b_d, splines * rows * sizeof(float)); checkCudaError("spline: error mallocing on device:"); cudaMalloc((void**)&pts_d, splines * num * sizeof(Point)); checkCudaError("spline: error mallocing on device:"); cudaMalloc((void**)&ss_d, splines * numCurves * sizeof(CubicCurve)); checkCudaError("spline: error mallocing on device:"); // Copies A to A_d: ensures the CUDA buffer is initially set to 0's cudaMemcpy(A_d, A, elems * sizeof(float), cudaMemcpyHostToDevice); free(A); // Copy point data to card. cudaMemcpy(pts_d, pts[0], num * splines * sizeof(Point), cudaMemcpyHostToDevice); checkCudaError("spline.cu: error memcpying to device:"); fillA<<<1, rows>>>(A_d, pts_d); checkCudaError("spline.cu: error filling A matrix:"); // Perform computation to fill A_d and b_d. // A: coefficients of z-values in equations. Each row of matrix is one eqn. // b: right-hand-side of the equations. // (Ax = b), where A is a matrix, and x and b are column vectors. for (int i = 0; i <= splines / MAX_GRID_SIZE; i++) { int nBlks = i * MAX_GRID_SIZE; int blocks = (i == splines / MAX_GRID_SIZE) ? splines % MAX_GRID_SIZE : MAX_GRID_SIZE; fillb<<<blocks, rows>>>(&b_d[nBlks * rows], &pts_d[nBlks * rows]); checkCudaError("spline.cu: error filling b vector:"); } // Make sure fillMatrices is done. cudaDeviceSynchronize(); float *x_d = solveSystem(A_d, b_d, splines, rows); cudaFree(A_d); cudaFree(b_d); for (int i = 0; i <= splines / MAX_GRID_SIZE; i++) { int nBlks = i * MAX_GRID_SIZE; int blocks = (i == splines / MAX_GRID_SIZE) ? splines % MAX_GRID_SIZE : MAX_GRID_SIZE; fillCubicCurves<<<blocks, num - 1>>>(&ss_d[nBlks * (num - 1)], &pts_d[nBlks * num], &x_d[nBlks * (num - 2)]); checkCudaError("spline.cu: error filling cubic curves:"); } cudaFree(x_d); cudaFree(pts_d); return ss_d; } __shared__ float h[TILE_SIZE]; __global__ void fillCubicCurves(CubicCurve* ccs, Point* pts, float* x) { // threadDim = numPoints int numPoints = blockDim.x; int numCurves = numPoints; // j = spNum = spline number int spNum = blockIdx.x; // i = ptNum = point number int ptNum = threadIdx.x; int rows = numPoints - 1; // Now that we have the matrix, solve for x. X then holds our Z-values // (the second derivative at the points). CubicCurve *cc = ccs + (spNum * numCurves); cc[0].z1 = 0; cc[spNum - 2].z2 = 0; // Fill CubicCurve array with Z-values and points if (ptNum != 0) cc[ptNum].z1 = x[spNum * rows + (ptNum - 1)]; cc[ptNum].p1 = pts[spNum * (numPoints + 1) + ptNum]; cc[ptNum].p2 = pts[spNum * (numPoints + 1) + (ptNum + 1)]; if (ptNum != rows) cc[ptNum].z2 = x[spNum * rows + ptNum]; } /** * Fills matrix A with coefficients of equations (where Z-values are vars) * and fills vector b with the right-hand-sides of the linear equation. * The length of pts should be 2 more than blockDim.x. * GridDim.x should be the number of matrices to fill. */ __global__ void fillA(float *A, Point *pts) { int j = 0; int pos = threadIdx.x; int rows = blockDim.x; // Initialize h values: h-sub-i is the distance between point i+1 and point // i on the x-axis. if (pos == rows - 1) h[pos + 1] = pts[pos + 2].x - pts[pos + 1].x; h[pos] = pts[pos + 1].x - pts[pos].x; __syncthreads(); j = (pos % rows == 0) ? 0 : pos - 1; // If we're not on row 0, fill the element left of the diagonal with h[pos] if (pos != 0) A[pos * rows + j++] = h[pos]; // For every row, fill the diagonal element A[pos * rows + j++] = 2*(h[pos + 1] + h[pos]); // If we're not on the last row, fill the element right of the diag. if (pos != rows - 1) A[pos * rows + j++] = h[pos + 1]; } __global__ void fillb(float *b, Point *pts) { int pos = blockIdx.x * blockDim.x + threadIdx.x, x = threadIdx.x; int ptpos = blockIdx.x * (blockDim.x + 2) + threadIdx.x; int rows = blockDim.x; // Initialize h values: h-sub-i is the distance between point i+1 and point // i on the x-axis. if (x == rows - 1) h[x + 1] = pts[ptpos + 2].x - pts[ptpos + 1].x; h[x] = pts[ptpos + 1].x - pts[ptpos].x; __syncthreads(); // Fill b with this formula. if (x < rows) b[pos] = 6*((pts[ptpos + 2].y - pts[ptpos + 1].y) / h[x + 1] - (pts[ptpos + 1].y - pts[ptpos].y) / h[x]); } /** * Takes the cubic spline described by cc and solves it. * blockDim.x indicates the granularity of the answer per curve. * gridDim.x indicates the number of curves to solve. * * Makes blockDim.x - 1 points between each two sets of curves. */ __global__ void solveSplineCu(CubicCurve *cc, Point *ans) { int idx = blockIdx.x * blockDim.x + threadIdx.x; int cc_num = blockIdx.x; int x_num = threadIdx.x; CubicCurve cv = cc[cc_num]; float h = cv.p2.x - cv.p1.x; float x = cv.p1.x + (h / blockDim.x) * ((float) x_num); float fx1 = x - cv.p1.x; float fx2 = cv.p2.x - x; ans[idx].x = x; ans[idx].y = (cv.z2 / (6.0*h)) * (fx1 * fx1 * fx1) + (cv.z1 / (6.0*h)) * (fx2 * fx2 * fx2) + (cv.p2.y / h - (h / 6.0) * cv.z2) * fx1 + (cv.p1.y / h - (h / 6.0) * cv.z1) * fx2; }
93ab64f62036574cbdaa12c478b92bb299617646.hip
// !!! This is a file automatically generated by hipify!!! #if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, hipStream_t stream); #pragma GCC diagnostic pop #endif
93ab64f62036574cbdaa12c478b92bb299617646.cu
#if !MEGDNN_TEGRA_X1 // generated by gen_cuda_conv_bias_kern_impls.py // ignore warning of cutlass #pragma GCC diagnostic push #pragma GCC diagnostic ignored "-Wunused-parameter" #pragma GCC diagnostic ignored "-Wstrict-aliasing" #include "src/cuda/conv_bias/int8/conv_bias_int8_implicit_gemm_cutlass_wrapper.cuinl" using LayoutSrc = cutlass::layout::TensorNCxHWx<4>; using LayoutFilter = cutlass::layout::TensorCxRSKx<4>; using LayoutDst = cutlass::layout::TensorNCxHWx<4>; using ThreadBlockShape = cutlass::gemm::GemmShape<32, 32, 32>; using WarpShape = cutlass::gemm::GemmShape<32, 32, 32>; using InstructionShape = cutlass::gemm::GemmShape<1, 1, 4>; using EpilogueOp = cutlass::epilogue::thread::BiasAddLinearCombinationHSwishClamp< int8_t, 4, int32_t, int32_t, float>; using Convolution = cutlass::conv::device::Convolution< int8_t, LayoutSrc, int8_t, LayoutFilter, int8_t, LayoutDst, int32_t, LayoutDst, int32_t, cutlass::conv::ConvType::kConvolution, cutlass::arch::OpClassSimt, cutlass::arch::Sm61, ThreadBlockShape, WarpShape, InstructionShape, EpilogueOp, cutlass::conv::threadblock::ConvolutionFpropNCxHWxThreadblockSwizzle, 2, 4, 16, false, cutlass::arch::OpMultiplyAddSaturate>; template void megdnn::cuda::cutlass_wrapper::cutlass_convolution_wrapper<Convolution>( const typename Convolution::ElementSrc* d_src, const typename Convolution::ElementFilter* d_filter, const typename Convolution::ElementBias* d_bias, const typename Convolution::ElementDst* d_z, typename Convolution::ElementDst* d_dst, int* workspace, typename Convolution::ConvolutionParameter const& conv_param, typename Convolution::EpilogueOutputOp::Params const& epilogue, cudaStream_t stream); #pragma GCC diagnostic pop #endif
9ad43d9b132fe61aba2650ee4563c4fc0b17ac60.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <THHUNN/THHUNN.h> #include <THH/THHTensor.hpp> #include <THHUNN/common.h> #include <THHUNN/linear_upsampling.h> #include <THH/THHDeviceTensor.cuh> #include <THH/THHDeviceTensorUtils.cuh> #include <THH/THHDeviceUtils.cuh> #include <TH/THHalf.h> #include <THHUNN/THHHalfAutoNumerics.cuh> #include <THH/THHAtomics.cuh> template<typename Dtype, typename Acctype> __global__ void caffe_gpu_interp2_kernel(const int n, const Acctype rheight, const Acctype rwidth, const bool align_corners, const THCDeviceTensor<Dtype, 4> data1, THCDeviceTensor<Dtype, 4> data2) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = data1.getSize(0); const int channels = data1.getSize(1); const int height1 = data1.getSize(2); const int width1 = data1.getSize(3); const int height2 = data2.getSize(2); const int width2 = data2.getSize(3); if (index < n) { const int w2 = index % width2; // 0:width2-1 const int h2 = index / width2; // 0:height2-1 // special case: just copy if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize ; n++){ for (int c = 0; c < channels; ++c) { const Dtype val = data1[n][c][h1][w1]; data2[n][c][h2][w2] = val; } } return; } // const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const Acctype h1lambda = h1r - h1; const Acctype h0lambda = Acctype(1) - h1lambda; // const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const Acctype w1lambda = w1r - w1; const Acctype w0lambda = Acctype(1) - w1lambda; // for (int n = 0; n < batchsize ; n++){ for (int c = 0; c < channels; ++c) { const Acctype val = h0lambda * (w0lambda * data1[n][c][h1][w1] + w1lambda * data1[n][c][h1][w1+w1p]) + h1lambda * (w0lambda * data1[n][c][h1+h1p][w1] + w1lambda * data1[n][c][h1+h1p][w1+w1p]); data2[n][c][h2][w2] = ScalarConvert<Acctype, Dtype>::to(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename Dtype, typename Acctype> __global__ void caffe_gpu_interp2_kernel_backward(const int n, const Acctype rheight, const Acctype rwidth, const bool align_corners, THCDeviceTensor<Dtype, 4> data1, const THCDeviceTensor<Dtype, 4> data2){ int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = data1.getSize(0); const int channels = data1.getSize(1); const int height1 = data1.getSize(2); const int width1 = data1.getSize(3); const int height2 = data2.getSize(2); const int width2 = data2.getSize(3); if (index < n) { const int w2 = index % width2; // 0:width2-1 const int h2 = index / width2; // 0:height2-1 // special case: just copy if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize ; n++){ for (int c = 0; c < channels; ++c) { const Dtype val = data2[n][c][h1][w1]; data1[n][c][h2][w2] += val; } } return; } // const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const Acctype h1lambda = h1r - h1; const Acctype h0lambda = Acctype(1) - h1lambda; // const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const Acctype w1lambda = w1r - w1; const Acctype w0lambda = Acctype(1) - w1lambda; // for (int n = 0; n < batchsize ; n++){ for (int c = 0; c < channels; ++c) { const Dtype d2val = data2[n][c][h2][w2]; atomicAdd(data1[n][c][h1][w1].data(), ScalarConvert<Acctype, Dtype>::to(h0lambda * w0lambda * d2val)); atomicAdd(data1[n][c][h1][w1+w1p].data(), ScalarConvert<Acctype, Dtype>::to(h0lambda * w1lambda * d2val)); atomicAdd(data1[n][c][h1+h1p][w1].data(), ScalarConvert<Acctype, Dtype>::to(h1lambda * w0lambda * d2val)); atomicAdd(data1[n][c][h1+h1p][w1+w1p].data(), ScalarConvert<Acctype, Dtype>::to(h1lambda * w1lambda * d2val)); } } } } #include <THHUNN/generic/SpatialUpSamplingBilinear.hip> #include <THH/THHGenerateFloatTypes.h>
9ad43d9b132fe61aba2650ee4563c4fc0b17ac60.cu
// Adapted from interp.cpp from Caffe util by Pauline Luc // Originally developed by George Papandreou #include <THCUNN/THCUNN.h> #include <THC/THCTensor.hpp> #include <THCUNN/common.h> #include <THCUNN/linear_upsampling.h> #include <THC/THCDeviceTensor.cuh> #include <THC/THCDeviceTensorUtils.cuh> #include <THC/THCDeviceUtils.cuh> #include <TH/THHalf.h> #include <THCUNN/THCHalfAutoNumerics.cuh> #include <THC/THCAtomics.cuh> template<typename Dtype, typename Acctype> __global__ void caffe_gpu_interp2_kernel(const int n, const Acctype rheight, const Acctype rwidth, const bool align_corners, const THCDeviceTensor<Dtype, 4> data1, THCDeviceTensor<Dtype, 4> data2) { int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = data1.getSize(0); const int channels = data1.getSize(1); const int height1 = data1.getSize(2); const int width1 = data1.getSize(3); const int height2 = data2.getSize(2); const int width2 = data2.getSize(3); if (index < n) { const int w2 = index % width2; // 0:width2-1 const int h2 = index / width2; // 0:height2-1 // special case: just copy if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize ; n++){ for (int c = 0; c < channels; ++c) { const Dtype val = data1[n][c][h1][w1]; data2[n][c][h2][w2] = val; } } return; } // const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const Acctype h1lambda = h1r - h1; const Acctype h0lambda = Acctype(1) - h1lambda; // const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const Acctype w1lambda = w1r - w1; const Acctype w0lambda = Acctype(1) - w1lambda; // for (int n = 0; n < batchsize ; n++){ for (int c = 0; c < channels; ++c) { const Acctype val = h0lambda * (w0lambda * data1[n][c][h1][w1] + w1lambda * data1[n][c][h1][w1+w1p]) + h1lambda * (w0lambda * data1[n][c][h1+h1p][w1] + w1lambda * data1[n][c][h1+h1p][w1+w1p]); data2[n][c][h2][w2] = ScalarConvert<Acctype, Dtype>::to(val); } } } } // Backward (adjoint) operation 1 <- 2 (accumulates) template <typename Dtype, typename Acctype> __global__ void caffe_gpu_interp2_kernel_backward(const int n, const Acctype rheight, const Acctype rwidth, const bool align_corners, THCDeviceTensor<Dtype, 4> data1, const THCDeviceTensor<Dtype, 4> data2){ int index = threadIdx.x + blockIdx.x * blockDim.x; const int batchsize = data1.getSize(0); const int channels = data1.getSize(1); const int height1 = data1.getSize(2); const int width1 = data1.getSize(3); const int height2 = data2.getSize(2); const int width2 = data2.getSize(3); if (index < n) { const int w2 = index % width2; // 0:width2-1 const int h2 = index / width2; // 0:height2-1 // special case: just copy if (height1 == height2 && width1 == width2) { const int h1 = h2; const int w1 = w2; for (int n = 0; n < batchsize ; n++){ for (int c = 0; c < channels; ++c) { const Dtype val = data2[n][c][h1][w1]; data1[n][c][h2][w2] += val; } } return; } // const Acctype h1r = linear_upsampling_compute_source_index<Acctype>(rheight, h2, align_corners); const int h1 = h1r; const int h1p = (h1 < height1 - 1) ? 1 : 0; const Acctype h1lambda = h1r - h1; const Acctype h0lambda = Acctype(1) - h1lambda; // const Acctype w1r = linear_upsampling_compute_source_index<Acctype>(rwidth, w2, align_corners); const int w1 = w1r; const int w1p = (w1 < width1 - 1) ? 1 : 0; const Acctype w1lambda = w1r - w1; const Acctype w0lambda = Acctype(1) - w1lambda; // for (int n = 0; n < batchsize ; n++){ for (int c = 0; c < channels; ++c) { const Dtype d2val = data2[n][c][h2][w2]; atomicAdd(data1[n][c][h1][w1].data(), ScalarConvert<Acctype, Dtype>::to(h0lambda * w0lambda * d2val)); atomicAdd(data1[n][c][h1][w1+w1p].data(), ScalarConvert<Acctype, Dtype>::to(h0lambda * w1lambda * d2val)); atomicAdd(data1[n][c][h1+h1p][w1].data(), ScalarConvert<Acctype, Dtype>::to(h1lambda * w0lambda * d2val)); atomicAdd(data1[n][c][h1+h1p][w1+w1p].data(), ScalarConvert<Acctype, Dtype>::to(h1lambda * w1lambda * d2val)); } } } } #include <THCUNN/generic/SpatialUpSamplingBilinear.cu> #include <THC/THCGenerateFloatTypes.h>
9281d38b9a1c3a224c4160c4ba2b7659e0754bde.hip
// !!! This is a file automatically generated by hipify!!! // Listing 5.1: dd_1d_global/main.cpp #include <iostream> #include <fstream> #include <hip/hip_runtime.h> #define TPB 64 // thread per block __global__ void ddKernel(float *d_out, const float *d_in, int size, float h) { // on device, and hence do not have access to CPU memory int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > size) return; if (i == 0 || i == size-1) { d_out[i] = 0; return; } d_out[i] = (d_in[i+1] + d_in[i-1] - 2.0f*d_in[i])/(h*h); } void ddParallel(float *out, const float *in, int n, float h) { // create device memory float *d_out = 0; float *d_in = 0; hipMalloc(&d_out, n*sizeof(float)); hipMalloc(&d_in, n*sizeof(float)); hipMemcpy(d_in, in, n*sizeof(float), hipMemcpyHostToDevice); // call ddKernel hipLaunchKernelGGL(( ddKernel), dim3((n+TPB-1)/TPB), dim3(TPB), 0, 0, d_out, d_in, n, h); hipMemcpy(out, d_out, n*sizeof(float), hipMemcpyDeviceToHost); hipFree(d_out); hipFree(d_in); } int main() { const float PI = 3.1415926; const int N = 150; const float h = 2*PI/N; // float x[N] = {0.0f}; float u[N] = {0.0f}; float result_parallel[N] = {0.0f}; // initialize x & u for (int i = 0; i < N; i++) { x[i] = i * (2 * PI / N); u[i] = sinf(x[i]); } ddParallel(result_parallel, u, N, h); std::ofstream outfile; outfile.open("results.csv"); // x[i] u[i] d2u/d2x[i] u[i] + d2u/d2x[i] // u = sin(x) d2u/d2x = -sin(x) u + d2u/d2x = 0.0 for (int i = 0; i < N; i++) { outfile << x[i] << ", " << u[i] << ", " << result_parallel[i] << ", " << result_parallel[i] + u[i] << "\n"; } outfile.close(); std::cout << "dd_1d_global\n"; }
9281d38b9a1c3a224c4160c4ba2b7659e0754bde.cu
// Listing 5.1: dd_1d_global/main.cpp #include <iostream> #include <fstream> #include <cuda_runtime.h> #define TPB 64 // thread per block __global__ void ddKernel(float *d_out, const float *d_in, int size, float h) { // on device, and hence do not have access to CPU memory int i = blockIdx.x * blockDim.x + threadIdx.x; if (i > size) return; if (i == 0 || i == size-1) { d_out[i] = 0; return; } d_out[i] = (d_in[i+1] + d_in[i-1] - 2.0f*d_in[i])/(h*h); } void ddParallel(float *out, const float *in, int n, float h) { // create device memory float *d_out = 0; float *d_in = 0; cudaMalloc(&d_out, n*sizeof(float)); cudaMalloc(&d_in, n*sizeof(float)); cudaMemcpy(d_in, in, n*sizeof(float), cudaMemcpyHostToDevice); // call ddKernel ddKernel<<<(n+TPB-1)/TPB, TPB>>>(d_out, d_in, n, h); cudaMemcpy(out, d_out, n*sizeof(float), cudaMemcpyDeviceToHost); cudaFree(d_out); cudaFree(d_in); } int main() { const float PI = 3.1415926; const int N = 150; const float h = 2*PI/N; // float x[N] = {0.0f}; float u[N] = {0.0f}; float result_parallel[N] = {0.0f}; // initialize x & u for (int i = 0; i < N; i++) { x[i] = i * (2 * PI / N); u[i] = sinf(x[i]); } ddParallel(result_parallel, u, N, h); std::ofstream outfile; outfile.open("results.csv"); // x[i] u[i] d2u/d2x[i] u[i] + d2u/d2x[i] // u = sin(x) d2u/d2x = -sin(x) u + d2u/d2x = 0.0 for (int i = 0; i < N; i++) { outfile << x[i] << ", " << u[i] << ", " << result_parallel[i] << ", " << result_parallel[i] + u[i] << "\n"; } outfile.close(); std::cout << "dd_1d_global\n"; }
61bb52c312d67907b70152428e0ee618dac25c01.hip
// !!! This is a file automatically generated by hipify!!! /* hw3 @author Ehsan MohyedinKermani */ #include <stdio.h> #include <math.h> #include <hip/hip_runtime.h> #include <hiprand/hiprand_kernel.h> #include "time_it.h" #define LOGISTIC 1 #define NORM 2 #define RND 3 static void HandleError( hipError_t err, const char *file, int line ) { if (err != hipSuccess) { printf( "%s in %s at line %d\n", hipGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) // struct for passing arguments through time_it_run to our kernel functions in Q1, Q2 struct kernel_arg { float *x, *result; uint n, m; int nblks, tpb; }; /* Question 1*/ __global__ void logistic(float *x, uint n, uint m) { uint i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { float val = x[i]; for(int iter = 0; iter < m; iter+=64) // unrolling for loop 64 times val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); } } void do_logistic(void *void_args) { struct kernel_arg *argk = (struct kernel_arg *)(void_args); hipLaunchKernelGGL(( logistic), dim3(argk->nblks), dim3(argk->tpb), 0, 0, argk->x, argk->n, argk->m); HANDLE_ERROR(hipDeviceSynchronize()); hipLaunchKernelGGL(( logistic), dim3(argk->nblks), dim3(argk->tpb), 0, 0, argk->x, argk->n, argk->m); HANDLE_ERROR(hipDeviceSynchronize()); hipLaunchKernelGGL(( logistic), dim3(argk->nblks), dim3(argk->tpb), 0, 0, argk->x, argk->n, argk->m); HANDLE_ERROR(hipDeviceSynchronize()); hipLaunchKernelGGL(( logistic), dim3(argk->nblks), dim3(argk->tpb), 0, 0, argk->x, argk->n, argk->m); HANDLE_ERROR(hipDeviceSynchronize()); } void time_logistic(uint n, int m, int tpb, int ntrials) { int nblks = n / tpb; uint size = n * sizeof(float); float *x; float *dev_x; x = (float *)malloc(size); // initialize x x[0] = 0.123f; for(int i = 1; i < n; i++) x[i] = 3.9f * x[i - 1] * (1.0f - x[i - 1]); HANDLE_ERROR(hipMalloc((void**)(&dev_x), size)); HANDLE_ERROR(hipMemcpy(dev_x, x, size, hipMemcpyHostToDevice)); struct kernel_arg argk; struct time_it_raw *tr = time_it_create(ntrials); struct time_it_stats stats; // initialize argk argk.n = n; argk.x = dev_x; argk.m = m; argk.nblks = nblks; argk.tpb = tpb; // run the kernel and report timing info time_it_run(tr, do_logistic, (void *)(&argk)); time_it_get_stats(tr, &stats); HANDLE_ERROR(hipMemcpy(x, dev_x, size, hipMemcpyDeviceToHost)); printf("Time logistic: mean(T) = %10.3e, std(T) = %10.3e\n", stats.mean, stats.std); printf("Number of GFLOPS is %10.3e\n", (3*m*n/stats.mean)/1e9); free(x); HANDLE_ERROR(hipFree(dev_x)); time_it_free(tr); } /* Question 2 */ #define NBLKS 2048 __shared__ float sdata[NBLKS]; __device__ void reduce_sum_dev(float *g_idata, float *g_odata, uint n) { // perform first level of reduction, // reading from global memory, writing to shared memory uint tid = threadIdx.x; uint i = blockIdx.x * blockDim.x * 2 + threadIdx.x; uint gridSize = blockDim.x * 2 * gridDim.x; float mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void norm(float *x, float *result, uint n) { uint i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) x[i] = x[i] * x[i]; reduce_sum_dev(x, result, n); } void do_norm(void *void_args) { struct kernel_arg *argk = (struct kernel_arg *)(void_args); hipLaunchKernelGGL(( norm), dim3((argk->nblks + argk->tpb - 1)/argk->tpb), dim3(argk->tpb), 0, 0, argk->x, argk->result, argk->n); HANDLE_ERROR(hipDeviceSynchronize()); } void mem_norm(uint n, uint nblks, uint tpb, int ntrials) { uint size = n * sizeof(float); float *x, *result; float *dev_x, *dev_result; x = (float *)malloc(size); result = (float *)malloc(size); x[0] = 0.123f; for(int i = 1; i < n; i++) x[i] = x[0]; HANDLE_ERROR(hipMalloc((void**)(&dev_x), size)); HANDLE_ERROR(hipMalloc((void**)(&dev_result), size)); HANDLE_ERROR(hipMemcpy(dev_x, x, size, hipMemcpyHostToDevice)); HANDLE_ERROR(hipMemcpy(dev_result, result, size, hipMemcpyHostToDevice)); struct kernel_arg argk; struct time_it_raw *tr = time_it_create(ntrials); struct time_it_stats stats; // initialize argk argk.n = n; argk.x = dev_x; argk.result = dev_result; argk.nblks = nblks; argk.tpb = tpb; // run the kernel and report timing info time_it_run(tr, do_norm, (void *)(&argk)); time_it_get_stats(tr, &stats); HANDLE_ERROR(hipMemcpy(x, dev_x, size, hipMemcpyDeviceToHost)); HANDLE_ERROR(hipMemcpy(result, dev_result, size, hipMemcpyDeviceToHost)); printf("Time norm: mean(T) = %10.3e, std(T) = %10.3e\n", stats.mean, stats.std); printf("Memory-bandwidth is %10.3e, GB/second\n", (n*sizeof(float)/stats.mean)/1e9); free(x); free(result); HANDLE_ERROR(hipFree(dev_x)); HANDLE_ERROR(hipFree(dev_result)); time_it_free(tr); } /* Question 3*/ // struct for passing arguments to time_it_run in Q3 struct kernel_rarg { uint nblks, tpb, m, seed; uint *x; hiprandState_t *randState; }; // initialization __global__ void setup_kernel(uint seed, hiprandState_t *state) { uint myId = blockDim.x * blockIdx.x + threadIdx.x; seed = blockIdx.x; hiprand_init(seed, myId, 0, &state[myId]); } // random number generator __global__ void rndm(uint *x, uint m, hiprandState_t *randState) { uint myId = blockDim.x * blockIdx.x + threadIdx.x; hiprandState_t *myRandState = &(randState[myId]); for(int j = 0; j < m; j++) { x[myId] = (hiprand_uniform(myRandState) <= 0.5); } } void randNumGenerator(void *void_args) { struct kernel_rarg *argk = (struct kernel_rarg *)(void_args); hipLaunchKernelGGL(( rndm), dim3(argk->nblks), dim3(argk->tpb), 0, 0, argk->x, argk->m, argk->randState); HANDLE_ERROR(hipDeviceSynchronize()); } void time_randNumGenerator(uint nblks, uint tpb, uint m, int ntrials) { uint n = nblks * tpb * m; uint size = n * sizeof(uint); uint *x; uint *dev_x; x = (uint *)malloc(size); hiprandState_t *devState; HANDLE_ERROR(hipMalloc((void**)(&devState), n * sizeof(hiprandState_t))); HANDLE_ERROR(hipMalloc((void**)(&dev_x), size)); HANDLE_ERROR(hipMemcpy(dev_x, x, size, hipMemcpyHostToDevice)); struct kernel_rarg argk; struct time_it_raw *tr = time_it_create(ntrials); struct time_it_stats stats; // initialize argk argk.x = dev_x; argk.nblks = nblks; argk.tpb = tpb; argk.m = m; argk.randState = devState; // run the kernel and report timing info time_it_run(tr, randNumGenerator, (void *)(&argk)); time_it_get_stats(tr, &stats); HANDLE_ERROR(hipMemcpy(x, dev_x, size, hipMemcpyDeviceToHost)); printf("Time randNumGenerator: mean(T) = %10.3e, std(T) = %10.3e\n", stats.mean, stats.std); printf("Random number generation speed is %10.3e, random numbers per second\n", (n*sizeof(float)/stats.mean)); free(x); HANDLE_ERROR(hipFree(dev_x)); time_it_free(tr); } int main(int argc, char **argv) { uint what = atoi(argv[1]); hipDeviceProp_t prop; int ndev; HANDLE_ERROR(hipGetDeviceCount(&ndev)); if(ndev < 1) { fprintf(stderr, "No CUDA device found!\n"); exit(-1); } HANDLE_ERROR(hipGetDeviceProperties(&prop, 0)); int sharedMemPerBlock = prop.sharedMemPerBlock; int regsPerBlock = prop.regsPerBlock; printf("GPU is a %s supporing CUDA level %d.%d\n", prop.name, prop.major, prop.minor); printf("It has %d SMs and a warp size of %d\n", prop.multiProcessorCount, prop.warpSize); printf("sharedMemPerBlock = %d, regsPerBlock = %d\n", sharedMemPerBlock, regsPerBlock); printf("clock rate = %d\n", prop.clockRate); switch(what) { case LOGISTIC: time_logistic(61440, 6400, 256, 10); break; case NORM: mem_norm(pow(2,26), 6144*2, 256, 10); break; case RND: time_randNumGenerator(1024, 512, 4, 10); break; default: fprintf(stderr, "ERROR: unknown test case -- %d\n", what); exit(-1); } exit(0); }
61bb52c312d67907b70152428e0ee618dac25c01.cu
/* hw3 @author Ehsan MohyedinKermani */ #include <stdio.h> #include <math.h> #include <cuda.h> #include <curand_kernel.h> #include "time_it.h" #define LOGISTIC 1 #define NORM 2 #define RND 3 static void HandleError( cudaError_t err, const char *file, int line ) { if (err != cudaSuccess) { printf( "%s in %s at line %d\n", cudaGetErrorString( err ), file, line ); exit( EXIT_FAILURE ); } } #define HANDLE_ERROR( err ) (HandleError( err, __FILE__, __LINE__ )) // struct for passing arguments through time_it_run to our kernel functions in Q1, Q2 struct kernel_arg { float *x, *result; uint n, m; int nblks, tpb; }; /* Question 1*/ __global__ void logistic(float *x, uint n, uint m) { uint i = blockIdx.x * blockDim.x + threadIdx.x; if(i < n) { float val = x[i]; for(int iter = 0; iter < m; iter+=64) // unrolling for loop 64 times val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); val = 3.9f * val * (1.0f - val); } } void do_logistic(void *void_args) { struct kernel_arg *argk = (struct kernel_arg *)(void_args); logistic<<<argk->nblks, argk->tpb>>>(argk->x, argk->n, argk->m); HANDLE_ERROR(cudaDeviceSynchronize()); logistic<<<argk->nblks, argk->tpb>>>(argk->x, argk->n, argk->m); HANDLE_ERROR(cudaDeviceSynchronize()); logistic<<<argk->nblks, argk->tpb>>>(argk->x, argk->n, argk->m); HANDLE_ERROR(cudaDeviceSynchronize()); logistic<<<argk->nblks, argk->tpb>>>(argk->x, argk->n, argk->m); HANDLE_ERROR(cudaDeviceSynchronize()); } void time_logistic(uint n, int m, int tpb, int ntrials) { int nblks = n / tpb; uint size = n * sizeof(float); float *x; float *dev_x; x = (float *)malloc(size); // initialize x x[0] = 0.123f; for(int i = 1; i < n; i++) x[i] = 3.9f * x[i - 1] * (1.0f - x[i - 1]); HANDLE_ERROR(cudaMalloc((void**)(&dev_x), size)); HANDLE_ERROR(cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice)); struct kernel_arg argk; struct time_it_raw *tr = time_it_create(ntrials); struct time_it_stats stats; // initialize argk argk.n = n; argk.x = dev_x; argk.m = m; argk.nblks = nblks; argk.tpb = tpb; // run the kernel and report timing info time_it_run(tr, do_logistic, (void *)(&argk)); time_it_get_stats(tr, &stats); HANDLE_ERROR(cudaMemcpy(x, dev_x, size, cudaMemcpyDeviceToHost)); printf("Time logistic: mean(T) = %10.3e, std(T) = %10.3e\n", stats.mean, stats.std); printf("Number of GFLOPS is %10.3e\n", (3*m*n/stats.mean)/1e9); free(x); HANDLE_ERROR(cudaFree(dev_x)); time_it_free(tr); } /* Question 2 */ #define NBLKS 2048 __shared__ float sdata[NBLKS]; __device__ void reduce_sum_dev(float *g_idata, float *g_odata, uint n) { // perform first level of reduction, // reading from global memory, writing to shared memory uint tid = threadIdx.x; uint i = blockIdx.x * blockDim.x * 2 + threadIdx.x; uint gridSize = blockDim.x * 2 * gridDim.x; float mySum = 0; // we reduce multiple elements per thread. The number is determined by the // number of active thread blocks (via gridDim). More blocks will result // in a larger gridSize and therefore fewer elements per thread while (i < n) { mySum += g_idata[i]; // ensure we don't read out of bounds if (i + blockDim.x < n) mySum += g_idata[i+blockDim.x]; i += gridSize; } // each thread puts its local sum into shared memory sdata[tid] = mySum; __syncthreads(); // do reduction in shared mem if (blockDim.x >= 512) { if (tid < 256) { sdata[tid] = mySum = mySum + sdata[tid + 256]; } __syncthreads(); } if (blockDim.x >= 256) { if (tid < 128) { sdata[tid] = mySum = mySum + sdata[tid + 128]; } __syncthreads(); } if (blockDim.x >= 128) { if (tid < 64) { sdata[tid] = mySum = mySum + sdata[tid + 64]; } __syncthreads(); } if (tid < 32) { // now that we are using warp-synchronous programming (below) // we need to declare our shared memory volatile so that the compiler // doesn't reorder stores to it and induce incorrect behavior. volatile float* smem = sdata; if (blockDim.x >= 64) { smem[tid] = mySum = mySum + smem[tid + 32]; } if (blockDim.x >= 32) { smem[tid] = mySum = mySum + smem[tid + 16]; } if (blockDim.x >= 16) { smem[tid] = mySum = mySum + smem[tid + 8]; } if (blockDim.x >= 8) { smem[tid] = mySum = mySum + smem[tid + 4]; } if (blockDim.x >= 4) { smem[tid] = mySum = mySum + smem[tid + 2]; } if (blockDim.x >= 2) { smem[tid] = mySum = mySum + smem[tid + 1]; } } // write result for this block to global mem if (tid == 0) g_odata[blockIdx.x] = sdata[0]; } __global__ void norm(float *x, float *result, uint n) { uint i = blockDim.x * blockIdx.x + threadIdx.x; if(i < n) x[i] = x[i] * x[i]; reduce_sum_dev(x, result, n); } void do_norm(void *void_args) { struct kernel_arg *argk = (struct kernel_arg *)(void_args); norm<<<(argk->nblks + argk->tpb - 1)/argk->tpb, argk->tpb>>>(argk->x, argk->result, argk->n); HANDLE_ERROR(cudaDeviceSynchronize()); } void mem_norm(uint n, uint nblks, uint tpb, int ntrials) { uint size = n * sizeof(float); float *x, *result; float *dev_x, *dev_result; x = (float *)malloc(size); result = (float *)malloc(size); x[0] = 0.123f; for(int i = 1; i < n; i++) x[i] = x[0]; HANDLE_ERROR(cudaMalloc((void**)(&dev_x), size)); HANDLE_ERROR(cudaMalloc((void**)(&dev_result), size)); HANDLE_ERROR(cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice)); HANDLE_ERROR(cudaMemcpy(dev_result, result, size, cudaMemcpyHostToDevice)); struct kernel_arg argk; struct time_it_raw *tr = time_it_create(ntrials); struct time_it_stats stats; // initialize argk argk.n = n; argk.x = dev_x; argk.result = dev_result; argk.nblks = nblks; argk.tpb = tpb; // run the kernel and report timing info time_it_run(tr, do_norm, (void *)(&argk)); time_it_get_stats(tr, &stats); HANDLE_ERROR(cudaMemcpy(x, dev_x, size, cudaMemcpyDeviceToHost)); HANDLE_ERROR(cudaMemcpy(result, dev_result, size, cudaMemcpyDeviceToHost)); printf("Time norm: mean(T) = %10.3e, std(T) = %10.3e\n", stats.mean, stats.std); printf("Memory-bandwidth is %10.3e, GB/second\n", (n*sizeof(float)/stats.mean)/1e9); free(x); free(result); HANDLE_ERROR(cudaFree(dev_x)); HANDLE_ERROR(cudaFree(dev_result)); time_it_free(tr); } /* Question 3*/ // struct for passing arguments to time_it_run in Q3 struct kernel_rarg { uint nblks, tpb, m, seed; uint *x; curandState *randState; }; // initialization __global__ void setup_kernel(uint seed, curandState *state) { uint myId = blockDim.x * blockIdx.x + threadIdx.x; seed = blockIdx.x; curand_init(seed, myId, 0, &state[myId]); } // random number generator __global__ void rndm(uint *x, uint m, curandState *randState) { uint myId = blockDim.x * blockIdx.x + threadIdx.x; curandState *myRandState = &(randState[myId]); for(int j = 0; j < m; j++) { x[myId] = (curand_uniform(myRandState) <= 0.5); } } void randNumGenerator(void *void_args) { struct kernel_rarg *argk = (struct kernel_rarg *)(void_args); rndm<<<argk->nblks, argk->tpb>>>(argk->x, argk->m, argk->randState); HANDLE_ERROR(cudaDeviceSynchronize()); } void time_randNumGenerator(uint nblks, uint tpb, uint m, int ntrials) { uint n = nblks * tpb * m; uint size = n * sizeof(uint); uint *x; uint *dev_x; x = (uint *)malloc(size); curandState *devState; HANDLE_ERROR(cudaMalloc((void**)(&devState), n * sizeof(curandState))); HANDLE_ERROR(cudaMalloc((void**)(&dev_x), size)); HANDLE_ERROR(cudaMemcpy(dev_x, x, size, cudaMemcpyHostToDevice)); struct kernel_rarg argk; struct time_it_raw *tr = time_it_create(ntrials); struct time_it_stats stats; // initialize argk argk.x = dev_x; argk.nblks = nblks; argk.tpb = tpb; argk.m = m; argk.randState = devState; // run the kernel and report timing info time_it_run(tr, randNumGenerator, (void *)(&argk)); time_it_get_stats(tr, &stats); HANDLE_ERROR(cudaMemcpy(x, dev_x, size, cudaMemcpyDeviceToHost)); printf("Time randNumGenerator: mean(T) = %10.3e, std(T) = %10.3e\n", stats.mean, stats.std); printf("Random number generation speed is %10.3e, random numbers per second\n", (n*sizeof(float)/stats.mean)); free(x); HANDLE_ERROR(cudaFree(dev_x)); time_it_free(tr); } int main(int argc, char **argv) { uint what = atoi(argv[1]); cudaDeviceProp prop; int ndev; HANDLE_ERROR(cudaGetDeviceCount(&ndev)); if(ndev < 1) { fprintf(stderr, "No CUDA device found!\n"); exit(-1); } HANDLE_ERROR(cudaGetDeviceProperties(&prop, 0)); int sharedMemPerBlock = prop.sharedMemPerBlock; int regsPerBlock = prop.regsPerBlock; printf("GPU is a %s supporing CUDA level %d.%d\n", prop.name, prop.major, prop.minor); printf("It has %d SMs and a warp size of %d\n", prop.multiProcessorCount, prop.warpSize); printf("sharedMemPerBlock = %d, regsPerBlock = %d\n", sharedMemPerBlock, regsPerBlock); printf("clock rate = %d\n", prop.clockRate); switch(what) { case LOGISTIC: time_logistic(61440, 6400, 256, 10); break; case NORM: mem_norm(pow(2,26), 6144*2, 256, 10); break; case RND: time_randNumGenerator(1024, 512, 4, 10); break; default: fprintf(stderr, "ERROR: unknown test case -- %d\n", what); exit(-1); } exit(0); }
d50bc209fc85d28f1761d5b25f5118550ea70f15.hip
// !!! This is a file automatically generated by hipify!!! #ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCStorage.cu" #else void THCStorage_(fill)(THCState *state, THCStorage *self, real value) { thrust::device_ptr<real> self_data(self->data); thrust::fill( #if TORCH_HIP_VERSION >= 7000 thrust::hip::par.on(THCState_getCurrentStream(state)), #endif self_data, self_data+self->size, value); } void THCStorage_(resize)(THCState *state, THCStorage *self, ptrdiff_t size) { THArgCheck(size >= 0, 2, "invalid size"); THAssert(self->allocator != NULL); int device; THCudaCheck(hipGetDevice(&device)); if(!(self->flag & TH_STORAGE_RESIZABLE)) THError("Trying to resize storage that is not resizable"); if (self->allocator->realloc) { THCHeapUpdate(state, (size - self->size) * sizeof(real)); hipError_t err = (*self->allocator->realloc)( self->allocatorContext, (void**)&(self->data), self->size * sizeof(real), size * sizeof(real), THCState_getCurrentStream(state)); if (err != hipSuccess) { THCHeapUpdate(state, (self->size - size) * sizeof(real)); THCudaCheck(err); } self->size = size; self->device = device; return; } if(size == 0) { if(self->flag & TH_STORAGE_FREEMEM) { THCudaCheck( (*self->allocator->free)(self->allocatorContext, self->data)); THCHeapUpdate(state, -self->size * sizeof(real)); } self->data = NULL; self->size = 0; self->device = device; } else { real *data = NULL; // update heap *before* attempting malloc, to free space for the malloc THCHeapUpdate(state, size * sizeof(real)); hipError_t err = (*self->allocator->malloc)(self->allocatorContext, (void**)&(data), size * sizeof(real), THCState_getCurrentStream(state)); if(err != hipSuccess) { THCHeapUpdate(state, -size * sizeof(real)); } THCudaCheck(err); if (self->data) { // Enable p2p access when the memcpy is across devices THCState_getPeerToPeerAccess(state, device, self->device); THCudaCheck(hipMemcpyAsync(data, self->data, THMin(self->size, size) * sizeof(real), hipMemcpyDeviceToDevice, THCState_getCurrentStream(state))); if(self->flag & TH_STORAGE_FREEMEM) { THCudaCheck( (*self->allocator->free)(self->allocatorContext, self->data)); THCHeapUpdate(state, -self->size * sizeof(real)); } } self->data = data; self->size = size; self->device = device; } } THC_API int THCStorage_(getDevice)(THCState* state, const THCStorage* storage) { return storage->device; } #endif
d50bc209fc85d28f1761d5b25f5118550ea70f15.cu
#ifndef THC_GENERIC_FILE #define THC_GENERIC_FILE "generic/THCStorage.cu" #else void THCStorage_(fill)(THCState *state, THCStorage *self, real value) { thrust::device_ptr<real> self_data(self->data); thrust::fill( #if CUDA_VERSION >= 7000 thrust::cuda::par.on(THCState_getCurrentStream(state)), #endif self_data, self_data+self->size, value); } void THCStorage_(resize)(THCState *state, THCStorage *self, ptrdiff_t size) { THArgCheck(size >= 0, 2, "invalid size"); THAssert(self->allocator != NULL); int device; THCudaCheck(cudaGetDevice(&device)); if(!(self->flag & TH_STORAGE_RESIZABLE)) THError("Trying to resize storage that is not resizable"); if (self->allocator->realloc) { THCHeapUpdate(state, (size - self->size) * sizeof(real)); cudaError_t err = (*self->allocator->realloc)( self->allocatorContext, (void**)&(self->data), self->size * sizeof(real), size * sizeof(real), THCState_getCurrentStream(state)); if (err != cudaSuccess) { THCHeapUpdate(state, (self->size - size) * sizeof(real)); THCudaCheck(err); } self->size = size; self->device = device; return; } if(size == 0) { if(self->flag & TH_STORAGE_FREEMEM) { THCudaCheck( (*self->allocator->free)(self->allocatorContext, self->data)); THCHeapUpdate(state, -self->size * sizeof(real)); } self->data = NULL; self->size = 0; self->device = device; } else { real *data = NULL; // update heap *before* attempting malloc, to free space for the malloc THCHeapUpdate(state, size * sizeof(real)); cudaError_t err = (*self->allocator->malloc)(self->allocatorContext, (void**)&(data), size * sizeof(real), THCState_getCurrentStream(state)); if(err != cudaSuccess) { THCHeapUpdate(state, -size * sizeof(real)); } THCudaCheck(err); if (self->data) { // Enable p2p access when the memcpy is across devices THCState_getPeerToPeerAccess(state, device, self->device); THCudaCheck(cudaMemcpyAsync(data, self->data, THMin(self->size, size) * sizeof(real), cudaMemcpyDeviceToDevice, THCState_getCurrentStream(state))); if(self->flag & TH_STORAGE_FREEMEM) { THCudaCheck( (*self->allocator->free)(self->allocatorContext, self->data)); THCHeapUpdate(state, -self->size * sizeof(real)); } } self->data = data; self->size = size; self->device = device; } } THC_API int THCStorage_(getDevice)(THCState* state, const THCStorage* storage) { return storage->device; } #endif
d689bac422ddfc2475b54fac882046199a752876.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "devicetodevicecopy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dphi = NULL; hipMalloc(&dphi, XSIZE*YSIZE); double *dpsix = NULL; hipMalloc(&dpsix, XSIZE*YSIZE); double *dpsiy = NULL; hipMalloc(&dpsiy, XSIZE*YSIZE); double *mphi = NULL; hipMalloc(&mphi, XSIZE*YSIZE); double *mpsix = NULL; hipMalloc(&mpsix, XSIZE*YSIZE); double *mpsiy = NULL; hipMalloc(&mpsiy, XSIZE*YSIZE); unsigned int nx = 1; unsigned int TileSize = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( devicetodevicecopy), dim3(gridBlock),dim3(threadBlock), 0, 0, dphi,dpsix,dpsiy,mphi,mpsix,mpsiy,nx,TileSize); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( devicetodevicecopy), dim3(gridBlock),dim3(threadBlock), 0, 0, dphi,dpsix,dpsiy,mphi,mpsix,mpsiy,nx,TileSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( devicetodevicecopy), dim3(gridBlock),dim3(threadBlock), 0, 0, dphi,dpsix,dpsiy,mphi,mpsix,mpsiy,nx,TileSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
d689bac422ddfc2475b54fac882046199a752876.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "devicetodevicecopy.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; double *dphi = NULL; cudaMalloc(&dphi, XSIZE*YSIZE); double *dpsix = NULL; cudaMalloc(&dpsix, XSIZE*YSIZE); double *dpsiy = NULL; cudaMalloc(&dpsiy, XSIZE*YSIZE); double *mphi = NULL; cudaMalloc(&mphi, XSIZE*YSIZE); double *mpsix = NULL; cudaMalloc(&mpsix, XSIZE*YSIZE); double *mpsiy = NULL; cudaMalloc(&mpsiy, XSIZE*YSIZE); unsigned int nx = 1; unsigned int TileSize = 1; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); devicetodevicecopy<<<gridBlock,threadBlock>>>(dphi,dpsix,dpsiy,mphi,mpsix,mpsiy,nx,TileSize); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { devicetodevicecopy<<<gridBlock,threadBlock>>>(dphi,dpsix,dpsiy,mphi,mpsix,mpsiy,nx,TileSize); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { devicetodevicecopy<<<gridBlock,threadBlock>>>(dphi,dpsix,dpsiy,mphi,mpsix,mpsiy,nx,TileSize); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
ddacdca293fc71f460ea48d313c80bdc53db09b3.hip
// !!! This is a file automatically generated by hipify!!! /* * * Authors: Fernando Amat * test_gpu_elementwiseOp.cpp * * Created on : June 5th, 2015 * Author : Fernando Amat * * \brief testing GPU kernels to perform pointwise operations * */ #include <cstdint> #include <stdlib.h> /* srand, rand */ #include <time.h> /* time */ #include <iostream> #include <algorithm> #include <fstream> #include "commonCUDA.h" #include "hip/hip_runtime.h" #include "book.h" #include "multiviewDeconvolution.h" typedef float dataType; using namespace std; int main(int argc, const char** argv) { std::cout << "testing GPU convolution kernel in the GPU running..." << std::endl; int devCUDA = 0; //parameters string filepath("C:/Users/Fernando/matlabProjects/deconvolution/CUDA/test/data/"); if (argc > 1) filepath = string(argv[1]); string filePatternPSF(filepath + "psfReg_?.klb"); string filePatternImg(filepath + "imReg_?.klb"); int numViews = 1; //===================================================================== HANDLE_ERROR(hipSetDevice(devCUDA)); //declare object multiviewDeconvolution<float> *J; J = new multiviewDeconvolution<float>; //set number of views J->setNumberOfViews(numViews); //read images string filename; int err; for (int ii = 0; ii < numViews; ii++) { filename = multiviewImage<float>::recoverFilenamePatternFromString(filePatternPSF, ii + 1); err = J->readImage(filename, ii, std::string("psf"));//this function should just read image if (err > 0) { cout << "ERROR: reading file " << filename << endl; return err; } filename = multiviewImage<float>::recoverFilenamePatternFromString(filePatternImg, ii + 1); err = J->readImage(filename, ii, std::string("img")); if (err > 0) { cout << "ERROR: reading file " << filename << endl; return err; } float* imConv = J->convolution3DfftCUDA_img_psf(ii, devCUDA); //write file char fileout[256]; sprintf(fileout, "%sout_test_convPSFimg_view%d.raw", filepath.c_str(), ii+1); ofstream fid(fileout, ios::binary); fid.write((char*)imConv, J->numElements_img(ii) * sizeof(float)); fid.close(); cout << "Convolution results written successfully at " << fileout << endl; delete[] imConv; } delete J; //-------------------------------------------------- //second test std::cout << "testing GPU convolution kernel in the GPU running..." << std::endl; filePatternPSF = string(filepath + "psfReg_1.klb"); filePatternImg = string(filepath + "J_iter0000.klb"); J = new multiviewDeconvolution<float>; //set number of views J->setNumberOfViews(1); //read images err = J->readImage(filePatternPSF, 0, std::string("psf"));//this function should just read image if (err > 0) { cout << "ERROR: reading file " << filename << endl; return err; } err = J->readImage(filePatternImg, 0, std::string("img")); if (err > 0) { cout << "ERROR: reading file " << filename << endl; return err; } float* imConv = J->convolution3DfftCUDA_img_psf(0, devCUDA); //write file char fileout[256]; sprintf(fileout, "%sout_test_convPSF_Jiter_.raw", filepath.c_str()); ofstream fid(fileout, ios::binary); fid.write((char*)imConv, J->numElements_img(0) * sizeof(float)); fid.close(); cout << "Convolution results written successfully at " << fileout << endl; delete[] imConv; delete J; return 0; }
ddacdca293fc71f460ea48d313c80bdc53db09b3.cu
/* * * Authors: Fernando Amat * test_gpu_elementwiseOp.cpp * * Created on : June 5th, 2015 * Author : Fernando Amat * * \brief testing GPU kernels to perform pointwise operations * */ #include <cstdint> #include <stdlib.h> /* srand, rand */ #include <time.h> /* time */ #include <iostream> #include <algorithm> #include <fstream> #include "commonCUDA.h" #include "cuda.h" #include "book.h" #include "multiviewDeconvolution.h" typedef float dataType; using namespace std; int main(int argc, const char** argv) { std::cout << "testing GPU convolution kernel in the GPU running..." << std::endl; int devCUDA = 0; //parameters string filepath("C:/Users/Fernando/matlabProjects/deconvolution/CUDA/test/data/"); if (argc > 1) filepath = string(argv[1]); string filePatternPSF(filepath + "psfReg_?.klb"); string filePatternImg(filepath + "imReg_?.klb"); int numViews = 1; //===================================================================== HANDLE_ERROR(cudaSetDevice(devCUDA)); //declare object multiviewDeconvolution<float> *J; J = new multiviewDeconvolution<float>; //set number of views J->setNumberOfViews(numViews); //read images string filename; int err; for (int ii = 0; ii < numViews; ii++) { filename = multiviewImage<float>::recoverFilenamePatternFromString(filePatternPSF, ii + 1); err = J->readImage(filename, ii, std::string("psf"));//this function should just read image if (err > 0) { cout << "ERROR: reading file " << filename << endl; return err; } filename = multiviewImage<float>::recoverFilenamePatternFromString(filePatternImg, ii + 1); err = J->readImage(filename, ii, std::string("img")); if (err > 0) { cout << "ERROR: reading file " << filename << endl; return err; } float* imConv = J->convolution3DfftCUDA_img_psf(ii, devCUDA); //write file char fileout[256]; sprintf(fileout, "%sout_test_convPSFimg_view%d.raw", filepath.c_str(), ii+1); ofstream fid(fileout, ios::binary); fid.write((char*)imConv, J->numElements_img(ii) * sizeof(float)); fid.close(); cout << "Convolution results written successfully at " << fileout << endl; delete[] imConv; } delete J; //-------------------------------------------------- //second test std::cout << "testing GPU convolution kernel in the GPU running..." << std::endl; filePatternPSF = string(filepath + "psfReg_1.klb"); filePatternImg = string(filepath + "J_iter0000.klb"); J = new multiviewDeconvolution<float>; //set number of views J->setNumberOfViews(1); //read images err = J->readImage(filePatternPSF, 0, std::string("psf"));//this function should just read image if (err > 0) { cout << "ERROR: reading file " << filename << endl; return err; } err = J->readImage(filePatternImg, 0, std::string("img")); if (err > 0) { cout << "ERROR: reading file " << filename << endl; return err; } float* imConv = J->convolution3DfftCUDA_img_psf(0, devCUDA); //write file char fileout[256]; sprintf(fileout, "%sout_test_convPSF_Jiter_.raw", filepath.c_str()); ofstream fid(fileout, ios::binary); fid.write((char*)imConv, J->numElements_img(0) * sizeof(float)); fid.close(); cout << "Convolution results written successfully at " << fileout << endl; delete[] imConv; delete J; return 0; }
06bb2988d3197c8a4da0250b0353e8c61eb4a102.hip
// !!! This is a file automatically generated by hipify!!! /*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "../common/common.h" #include "../common/span.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" #include "coordinate_common.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); void RescaleIndices(int device_idx, size_t ridx_begin, common::Span<xgboost::Entry> data) { dh::LaunchN(device_idx, data.size(), [=] __device__(size_t idx) { data[idx].index -= ridx_begin; }); } class DeviceShard { int device_id_; dh::BulkAllocator ba_; std::vector<size_t> row_ptr_; common::Span<xgboost::Entry> data_; common::Span<GradientPair> gpair_; dh::CubMemory temp_; size_t ridx_begin_; size_t ridx_end_; public: DeviceShard(int device_id, const SparsePage &batch, // column batch bst_uint row_begin, bst_uint row_end, const LinearTrainParam &param, const gbm::GBLinearModelParam &model_param) : device_id_(device_id), ridx_begin_(row_begin), ridx_end_(row_end) { if ( IsEmpty() ) { return; } dh::safe_cuda(hipSetDevice(device_id_)); // The begin and end indices for the section of each column associated with // this shard std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (auto fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(row_begin, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(row_end, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } ba_.Allocate(device_id_, &data_, row_ptr_.back(), &gpair_, (row_end - row_begin) * model_param.num_output_group); for (int fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(hipMemcpy( data_.subspan(row_ptr_[fidx]).data(), col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), hipMemcpyHostToDevice)); } // Rescale indices with respect to current shard RescaleIndices(device_id_, ridx_begin_, data_); } bool IsEmpty() { return (ridx_end_ - ridx_begin_) == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair, const gbm::GBLinearModelParam &model_param) { dh::safe_cuda(hipMemcpyAsync( gpair_.data(), host_gpair.data() + ridx_begin_ * model_param.num_output_group, gpair_.size() * sizeof(GradientPair), hipMemcpyHostToDevice)); } GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(hipSetDevice(device_id_)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(temp_, perm, ridx_end_ - ridx_begin_); } void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_; dh::LaunchN(device_id_, ridx_end_ - ridx_begin_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(hipSetDevice(device_id_)); common::Span<xgboost::Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = gpair_; auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = gpair_; common::Span<Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(device_id_, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } }; /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { public: // set training parameter void Init( const std::vector<std::pair<std::string, std::string>> &args) override { tparam_.InitAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); monitor_.Init("GPUCoordinateUpdater"); } void LazyInitShards(DMatrix *p_fmat, const gbm::GBLinearModelParam &model_param) { if (!shards_.empty()) return; dist_ = GPUDistribution::Block(GPUSet::All(tparam_.gpu_id, tparam_.n_gpus, p_fmat->Info().num_row_)); auto devices = dist_.Devices(); size_t n_devices = static_cast<size_t>(devices.Size()); size_t row_begin = 0; size_t num_row = static_cast<size_t>(p_fmat->Info().num_row_); // Partition input matrix into row segments std::vector<size_t> row_segments; row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { size_t shard_size = dist_.ShardSize(num_row, d_idx); size_t row_end = row_begin + shard_size; row_segments.push_back(row_end); row_begin = row_end; } CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetColumnBatches().begin()); shards_.resize(n_devices); // Create device shards dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard>& shard) { shard = std::unique_ptr<DeviceShard>( new DeviceShard(devices.DeviceId(i), batch, row_segments[i], row_segments[i + 1], tparam_, model_param)); }); } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitShards"); this->LazyInitShards(p_fmat, model->param); monitor_.Stop("LazyInitShards"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair dh::ExecuteIndexShards(&shards_, [&](int idx, std::unique_ptr<DeviceShard>& shard) { if (!shard->IsEmpty()) { shard->UpdateGpair(in_gpair_host, model->param); } }); monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { for (auto i = 0U; i < model->param.num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards_, [&](std::unique_ptr<DeviceShard> &shard) { if (!shard->IsEmpty()) { GradientPair result = shard->GetBiasGradient(group_idx, model->param.num_output_group); return result; } return GradientPair(0, 0); }); auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual dh::ExecuteIndexShards(&shards_, [&](int idx, std::unique_ptr<DeviceShard>& shard) { if (!shard->IsEmpty()) { shard->UpdateBiasResidual(dbias, group_idx, model->param.num_output_group); } }); } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards_, [&](std::unique_ptr<DeviceShard> &shard) { if (!shard->IsEmpty()) { return shard->GetGradient(group_idx, model->param.num_output_group, fidx); } return GradientPair(0, 0); }); auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; dh::ExecuteIndexShards(&shards_, [&](int idx, std::unique_ptr<DeviceShard> &shard) { if (!shard->IsEmpty()) { shard->UpdateResidual(dw, group_idx, model->param.num_output_group, fidx); } }); } private: // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; GPUDistribution dist_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<std::unique_ptr<DeviceShard>> shards_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
06bb2988d3197c8a4da0250b0353e8c61eb4a102.cu
/*! * Copyright 2018 by Contributors * \author Rory Mitchell */ #include <thrust/execution_policy.h> #include <thrust/inner_product.h> #include <xgboost/data.h> #include <xgboost/linear_updater.h> #include "../common/common.h" #include "../common/span.h" #include "../common/device_helpers.cuh" #include "../common/timer.h" #include "./param.h" #include "coordinate_common.h" namespace xgboost { namespace linear { DMLC_REGISTRY_FILE_TAG(updater_gpu_coordinate); void RescaleIndices(int device_idx, size_t ridx_begin, common::Span<xgboost::Entry> data) { dh::LaunchN(device_idx, data.size(), [=] __device__(size_t idx) { data[idx].index -= ridx_begin; }); } class DeviceShard { int device_id_; dh::BulkAllocator ba_; std::vector<size_t> row_ptr_; common::Span<xgboost::Entry> data_; common::Span<GradientPair> gpair_; dh::CubMemory temp_; size_t ridx_begin_; size_t ridx_end_; public: DeviceShard(int device_id, const SparsePage &batch, // column batch bst_uint row_begin, bst_uint row_end, const LinearTrainParam &param, const gbm::GBLinearModelParam &model_param) : device_id_(device_id), ridx_begin_(row_begin), ridx_end_(row_end) { if ( IsEmpty() ) { return; } dh::safe_cuda(cudaSetDevice(device_id_)); // The begin and end indices for the section of each column associated with // this shard std::vector<std::pair<bst_uint, bst_uint>> column_segments; row_ptr_ = {0}; // iterate through columns for (auto fidx = 0; fidx < batch.Size(); fidx++) { common::Span<Entry const> col = batch[fidx]; auto cmp = [](Entry e1, Entry e2) { return e1.index < e2.index; }; auto column_begin = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(row_begin, 0.0f), cmp); auto column_end = std::lower_bound(col.cbegin(), col.cend(), xgboost::Entry(row_end, 0.0f), cmp); column_segments.emplace_back( std::make_pair(column_begin - col.cbegin(), column_end - col.cbegin())); row_ptr_.push_back(row_ptr_.back() + (column_end - column_begin)); } ba_.Allocate(device_id_, &data_, row_ptr_.back(), &gpair_, (row_end - row_begin) * model_param.num_output_group); for (int fidx = 0; fidx < batch.Size(); fidx++) { auto col = batch[fidx]; auto seg = column_segments[fidx]; dh::safe_cuda(cudaMemcpy( data_.subspan(row_ptr_[fidx]).data(), col.data() + seg.first, sizeof(Entry) * (seg.second - seg.first), cudaMemcpyHostToDevice)); } // Rescale indices with respect to current shard RescaleIndices(device_id_, ridx_begin_, data_); } bool IsEmpty() { return (ridx_end_ - ridx_begin_) == 0; } void UpdateGpair(const std::vector<GradientPair> &host_gpair, const gbm::GBLinearModelParam &model_param) { dh::safe_cuda(cudaMemcpyAsync( gpair_.data(), host_gpair.data() + ridx_begin_ * model_param.num_output_group, gpair_.size() * sizeof(GradientPair), cudaMemcpyHostToDevice)); } GradientPair GetBiasGradient(int group_idx, int num_group) { dh::safe_cuda(cudaSetDevice(device_id_)); auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { return idx * num_group + group_idx; }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), size_t> skip( counting, f); auto perm = thrust::make_permutation_iterator(gpair_.data(), skip); return dh::SumReduction(temp_, perm, ridx_end_ - ridx_begin_); } void UpdateBiasResidual(float dbias, int group_idx, int num_groups) { if (dbias == 0.0f) return; auto d_gpair = gpair_; dh::LaunchN(device_id_, ridx_end_ - ridx_begin_, [=] __device__(size_t idx) { auto &g = d_gpair[idx * num_groups + group_idx]; g += GradientPair(g.GetHess() * dbias, 0); }); } GradientPair GetGradient(int group_idx, int num_group, int fidx) { dh::safe_cuda(cudaSetDevice(device_id_)); common::Span<xgboost::Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; common::Span<GradientPair> d_gpair = gpair_; auto counting = thrust::make_counting_iterator(0ull); auto f = [=] __device__(size_t idx) { auto entry = d_col[idx]; auto g = d_gpair[entry.index * num_group + group_idx]; return GradientPair(g.GetGrad() * entry.fvalue, g.GetHess() * entry.fvalue * entry.fvalue); }; // NOLINT thrust::transform_iterator<decltype(f), decltype(counting), GradientPair> multiply_iterator(counting, f); return dh::SumReduction(temp_, multiply_iterator, col_size); } void UpdateResidual(float dw, int group_idx, int num_groups, int fidx) { common::Span<GradientPair> d_gpair = gpair_; common::Span<Entry> d_col = data_.subspan(row_ptr_[fidx]); size_t col_size = row_ptr_[fidx + 1] - row_ptr_[fidx]; dh::LaunchN(device_id_, col_size, [=] __device__(size_t idx) { auto entry = d_col[idx]; auto &g = d_gpair[entry.index * num_groups + group_idx]; g += GradientPair(g.GetHess() * dw * entry.fvalue, 0); }); } }; /** * \class GPUCoordinateUpdater * * \brief Coordinate descent algorithm that updates one feature per iteration */ class GPUCoordinateUpdater : public LinearUpdater { public: // set training parameter void Init( const std::vector<std::pair<std::string, std::string>> &args) override { tparam_.InitAllowUnknown(args); selector_.reset(FeatureSelector::Create(tparam_.feature_selector)); monitor_.Init("GPUCoordinateUpdater"); } void LazyInitShards(DMatrix *p_fmat, const gbm::GBLinearModelParam &model_param) { if (!shards_.empty()) return; dist_ = GPUDistribution::Block(GPUSet::All(tparam_.gpu_id, tparam_.n_gpus, p_fmat->Info().num_row_)); auto devices = dist_.Devices(); size_t n_devices = static_cast<size_t>(devices.Size()); size_t row_begin = 0; size_t num_row = static_cast<size_t>(p_fmat->Info().num_row_); // Partition input matrix into row segments std::vector<size_t> row_segments; row_segments.push_back(0); for (int d_idx = 0; d_idx < n_devices; ++d_idx) { size_t shard_size = dist_.ShardSize(num_row, d_idx); size_t row_end = row_begin + shard_size; row_segments.push_back(row_end); row_begin = row_end; } CHECK(p_fmat->SingleColBlock()); SparsePage const& batch = *(p_fmat->GetColumnBatches().begin()); shards_.resize(n_devices); // Create device shards dh::ExecuteIndexShards(&shards_, [&](int i, std::unique_ptr<DeviceShard>& shard) { shard = std::unique_ptr<DeviceShard>( new DeviceShard(devices.DeviceId(i), batch, row_segments[i], row_segments[i + 1], tparam_, model_param)); }); } void Update(HostDeviceVector<GradientPair> *in_gpair, DMatrix *p_fmat, gbm::GBLinearModel *model, double sum_instance_weight) override { tparam_.DenormalizePenalties(sum_instance_weight); monitor_.Start("LazyInitShards"); this->LazyInitShards(p_fmat, model->param); monitor_.Stop("LazyInitShards"); monitor_.Start("UpdateGpair"); auto &in_gpair_host = in_gpair->ConstHostVector(); // Update gpair dh::ExecuteIndexShards(&shards_, [&](int idx, std::unique_ptr<DeviceShard>& shard) { if (!shard->IsEmpty()) { shard->UpdateGpair(in_gpair_host, model->param); } }); monitor_.Stop("UpdateGpair"); monitor_.Start("UpdateBias"); this->UpdateBias(p_fmat, model); monitor_.Stop("UpdateBias"); // prepare for updating the weights selector_->Setup(*model, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm, coord_param_.top_k); monitor_.Start("UpdateFeature"); for (auto group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { for (auto i = 0U; i < model->param.num_feature; i++) { auto fidx = selector_->NextFeature( i, *model, group_idx, in_gpair->ConstHostVector(), p_fmat, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm); if (fidx < 0) break; this->UpdateFeature(fidx, group_idx, &in_gpair->HostVector(), model); } } monitor_.Stop("UpdateFeature"); } void UpdateBias(DMatrix *p_fmat, gbm::GBLinearModel *model) { for (int group_idx = 0; group_idx < model->param.num_output_group; ++group_idx) { // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards_, [&](std::unique_ptr<DeviceShard> &shard) { if (!shard->IsEmpty()) { GradientPair result = shard->GetBiasGradient(group_idx, model->param.num_output_group); return result; } return GradientPair(0, 0); }); auto dbias = static_cast<float>( tparam_.learning_rate * CoordinateDeltaBias(grad.GetGrad(), grad.GetHess())); model->bias()[group_idx] += dbias; // Update residual dh::ExecuteIndexShards(&shards_, [&](int idx, std::unique_ptr<DeviceShard>& shard) { if (!shard->IsEmpty()) { shard->UpdateBiasResidual(dbias, group_idx, model->param.num_output_group); } }); } } void UpdateFeature(int fidx, int group_idx, std::vector<GradientPair> *in_gpair, gbm::GBLinearModel *model) { bst_float &w = (*model)[fidx][group_idx]; // Get gradient auto grad = dh::ReduceShards<GradientPair>( &shards_, [&](std::unique_ptr<DeviceShard> &shard) { if (!shard->IsEmpty()) { return shard->GetGradient(group_idx, model->param.num_output_group, fidx); } return GradientPair(0, 0); }); auto dw = static_cast<float>(tparam_.learning_rate * CoordinateDelta(grad.GetGrad(), grad.GetHess(), w, tparam_.reg_alpha_denorm, tparam_.reg_lambda_denorm)); w += dw; dh::ExecuteIndexShards(&shards_, [&](int idx, std::unique_ptr<DeviceShard> &shard) { if (!shard->IsEmpty()) { shard->UpdateResidual(dw, group_idx, model->param.num_output_group, fidx); } }); } private: // training parameter LinearTrainParam tparam_; CoordinateParam coord_param_; GPUDistribution dist_; std::unique_ptr<FeatureSelector> selector_; common::Monitor monitor_; std::vector<std::unique_ptr<DeviceShard>> shards_; }; XGBOOST_REGISTER_LINEAR_UPDATER(GPUCoordinateUpdater, "gpu_coord_descent") .describe( "Update linear model according to coordinate descent algorithm. GPU " "accelerated.") .set_body([]() { return new GPUCoordinateUpdater(); }); } // namespace linear } // namespace xgboost
d0fc13ca15b88e17a9d08e014ddec94e47697703.hip
// !!! This is a file automatically generated by hipify!!! /* Copyright 2021. Uecker Lab. University Medical Center Gttingen. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: Moritz Blumenthal */ #include <cstdint> #include <stdint.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include <hip/hip_runtime_api.h> #include <hip/hip_runtime.h> #include <hip/hip_complex.h> #include "misc/misc.h" #include "num/gpu_conv.h" #include "num/multind.h" // limited by hardware to 1024 on most devices // should be a multiple of 32 (warp size) #define BLOCKSIZE 1024 static void getBlockSize3_internal(int block[3], const long dims[3], const void* func) { hipFuncAttributes attr; hipFuncGetAttributes(&attr, func); int threads = attr.maxThreadsPerBlock; block[0] = 1; block[1] = 1; block[2] = 1; while ((threads >= 2) && (block[0] < dims[0])) { block[0] *= 2; threads /= 2; } while ((threads >= 2) && (block[1] < dims[1])) { block[1] *= 2; threads /= 2; } while ((threads >= 2) && (block[2] < dims[2])) { block[2] *= 2; threads /= 2; } } static dim3 getBlockSize3(const long dims[3], const void* func) { int block[3]; getBlockSize3_internal(block, dims, func); return dim3(block[0], block[1], block[2]); } static long gridsize_int(long N, int blocksize) { return (N + blocksize - 1) / blocksize; } static dim3 getGridSize3(const long dims[3], const void* func) { int block[3]; getBlockSize3_internal(block, dims, func); return dim3(gridsize_int(dims[0], block[0]), gridsize_int(dims[1], block[1]), gridsize_int(dims[2], block[2])); } static dim3 blocksize(int N, const void* func) { const long dims[3] = { N, 1, 1}; return getBlockSize3(dims, func); } static dim3 gridsize(long N, const void* func) { const long dims[3] = { N, 1, 1}; return getGridSize3(dims, func); } template <int DIMS, typename T> struct im2col_descriptor { T NC; // number channels T istrs_NC; // 1 T ostrs_NC; // 1 T odims[DIMS]; // dimensions of the convolution (not including channel) T kdims[DIMS]; T idims[DIMS]; T istrs_odims[DIMS]; // input strides of im2col (in elements) T istrs_kdims[DIMS]; T ostrs_kdims[DIMS]; // output strides of im2col (in elements) T ostrs_odims[DIMS]; T N_in_elements; // channels * in-dims T N_out_elements; // channels * out-dims * krn-dims T N_out_elements_o_only; // channels * out-dims T N_out_elements_k_only; // channels * krn-dims bool triv_strides_dilation; // trivial dilation and strides }; template <int DIMS, typename T> static struct im2col_descriptor<DIMS, T>get_im2col_descriptor(const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { struct im2col_descriptor<DIMS, T>config; config.NC = idims[1]; config.istrs_NC = 1; config.ostrs_NC = 1; config.N_in_elements = idims[1]; config.N_out_elements = idims[1]; config.N_out_elements_o_only = idims[1]; config.N_out_elements_k_only = idims[1]; config.triv_strides_dilation = true; long istrs[5]; md_calc_strides(5, istrs, idims, 1); for (int i = 0; i < DIMS; i++) { config.odims[i] = 1; config.kdims[i] = 1; config.idims[i] = 1; config.istrs_odims[i] = 0; config.istrs_kdims[i] = 0; config.ostrs_odims[i] = 0; config.ostrs_kdims[i] = 0; } for (int i = 2, j = 0; i < 5; i++) { if (!((1 < odims[i]) || (1 < kdims[i]))) continue; assert(j < DIMS); config.odims[j] = odims[i]; config.kdims[j] = kdims[i]; config.idims[j] = idims[i]; config.istrs_odims[j] = istrs[i] * (NULL == strides ? 1 : strides[i]); config.istrs_kdims[j] = istrs[i] * (NULL == dilation ? 1 : dilation[i]); config.N_in_elements *= idims[i]; config.N_out_elements_o_only *= odims[i]; config.N_out_elements_k_only *= kdims[i]; config.N_out_elements *= odims[i] * kdims[i]; config.triv_strides_dilation &= ( (config.istrs_odims[j] == istrs[i]) && (config.istrs_kdims[j] == istrs[i])); j++; } config.ostrs_odims[0] = config.N_out_elements_k_only; config.ostrs_kdims[0] = config.NC; for (int i = 1; i < DIMS; i++) { config.ostrs_odims[i] = config.ostrs_odims[i - 1] * config.odims[i - 1]; config.ostrs_kdims[i] = config.ostrs_kdims[i - 1] * config.kdims[i - 1]; } return config; } // loop over out-dims and krn-dims and copy elements from input (copies one element per thread) template <int DIMS, typename T, bool transp> __global__ static void kern_im2col_valid(struct im2col_descriptor<DIMS, T> config, cuFloatComplex* dst, const cuFloatComplex* src) { int start = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (T i = start; i < config.N_out_elements; i += stride) { T i_cur = i; T i_new = i; T in_index = 0; if (1 < config.NC) { i_new = i_cur / config.NC; in_index = (i_cur - config.NC * i_new) * config.istrs_NC; i_cur = i_new; } for (int j = 0; j < DIMS; j++) { i_new = i_cur / config.kdims[j]; in_index += config.istrs_kdims[j] * (i_cur - config.kdims[j] * i_new); i_cur = i_new; } for (int j = 0; j < DIMS - 1; j++) { i_new = i_cur / config.odims[j]; in_index += config.istrs_odims[j] * (i_cur - config.odims[j] * i_new); i_cur = i_new; } in_index += i_cur * config.istrs_odims[DIMS - 1]; if (transp) { atomicAdd(&(dst[in_index].x), src[i].x); atomicAdd(&(dst[in_index].y), src[i].y); } else { dst[i] = src[in_index]; } } } // loop over in-dims and copy elements from input to all corresponding output position template <int DIMS, typename T, bool transp> __global__ static void kern_im2col_valid_no_dil_str(struct im2col_descriptor<DIMS, T> config, cuFloatComplex* dst, const cuFloatComplex* src) { int start = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (T i = start; i < config.N_in_elements; i += stride) { T i_cur = i; T i_new = i_cur / config.NC; T c = i_cur - i_new * config.NC; T idx_i[3] = { 0, 0, 0 }; T idx_k[3] = { 0, 0, 0 }; for (int j = 0; j < DIMS - 1; j++) { i_cur = i_new; i_new = i_cur / config.idims[j]; idx_i[j] = i_cur - i_new * config.idims[j]; } idx_i[DIMS - 1] = i_new; cuFloatComplex tmp = transp ? dst[i] : src[i]; T kdims[3]; for (int j = 0; j < 3; j++) kdims[j] = (j < DIMS) ? config.kdims[j] : 1; for (idx_k[2] = 0; idx_k[2] < kdims[2]; idx_k[2]++) for (idx_k[1] = 0; idx_k[1] < kdims[1]; idx_k[1]++) for (idx_k[0] = 0; idx_k[0] < kdims[0]; idx_k[0]++) { bool copy = true; T o_stride = config.N_out_elements_k_only; T k_stride = config.NC; T index = c; for (int j = 0; j < DIMS; j++) { copy = copy && (idx_k[j] <= idx_i[j]) && (idx_i[j] < idx_k[j] + config.odims[j]); index += (idx_i[j] - idx_k[j]) * o_stride + idx_k[j] * k_stride; o_stride *= config.odims[j]; k_stride *= config.kdims[j]; } if (copy) { if (transp) tmp = cuCaddf(tmp, src[index]); else dst[index] = tmp; } } if (transp) dst[i] = tmp; } } template <int DIMS, typename T, bool transp> static void cuda_im2col_int(_Complex float* dst, const _Complex float* src, const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { struct im2col_descriptor<DIMS, T> config = get_im2col_descriptor<DIMS, T>(odims, idims, kdims, dilation, strides); bool func1 = true; #ifdef NON_DETERMINISTIC bool func2 = true; #else bool func2 = !transp; #endif func1 = func1 && config.triv_strides_dilation; func1 = func1 && (!func2 || (1 < config.NC)); if (func1) { const void* func = (const void*)kern_im2col_valid_no_dil_str<DIMS, T, transp>; hipLaunchKernelGGL(( kern_im2col_valid_no_dil_str<DIMS, T, transp>), dim3(gridsize(config.N_in_elements, func)), dim3(blocksize(config.N_in_elements, func)), 0, 0, config, (cuFloatComplex*) dst, (cuFloatComplex*) src); return; } if (func2) { const void* func = (const void*)kern_im2col_valid<DIMS, T, transp>; hipLaunchKernelGGL(( kern_im2col_valid<DIMS, T, transp>), dim3(gridsize(config.N_in_elements, func)), dim3(blocksize(config.N_in_elements, func)), 0, 0, config, (cuFloatComplex*) dst, (cuFloatComplex*) src); return; } assert(0); } template <bool transp> static void cuda_im2col_int2(_Complex float* dst, const _Complex float* src, const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { long Nout = idims[1] * md_calc_size(3, kdims + 2) * md_calc_size(3, odims + 2); int DIMS = bitcount(md_nontriv_dims(3, kdims + 2) | md_nontriv_dims(3, odims + 2)); for (int i = 0 ; i < 3; i++) if (1 == odims[DIMS + 1] * kdims[DIMS + 1] * idims[DIMS + 1] * (NULL != dilation ? dilation[DIMS + 1] : 1) * (NULL != strides ? strides[DIMS + 1] : 1)) DIMS --; DIMS = 3; switch (DIMS) { case 1: if (INT32_MAX / 2 > Nout) cuda_im2col_int<1, uint32_t, transp>(dst, src, odims, idims, kdims, dilation, strides); else cuda_im2col_int<1, uint64_t, transp>(dst, src, odims, idims, kdims, dilation, strides); break; case 2: if (INT32_MAX / 2 > Nout) cuda_im2col_int<2, uint32_t, transp>(dst, src, odims, idims, kdims, dilation, strides); else cuda_im2col_int<2, uint64_t, transp>(dst, src, odims, idims, kdims, dilation, strides); break; case 3: if (INT32_MAX / 2 > Nout) cuda_im2col_int<3, uint32_t, transp>(dst, src, odims, idims, kdims, dilation, strides); else cuda_im2col_int<3, uint64_t, transp>(dst, src, odims, idims, kdims, dilation, strides); break; default: assert(0); } } /* * * Optimized kernel for copying im2col (complex float only) * * @args dst * @args src * @args odims [OC, 1, OX, OY, OZ] * @args idims [ 1, IC, IX, IY, IZ] * @args kdims [OC, IC, KX, KY, KZ] * @args dilation [ 1, 1, DX, DY, DZ] or NULL * @args strides [ 1, 1, SX, SY, SZ] or NULL * * Copy: * dims: [IC, KX, KY, KZ, OX, OY, OZ] * ostrs: trivial strides of dims * istrs: [ISC, ISX * DX, ISY * DY, ISZ * DZ, ISX * SX, ISY * SY, ISZ * SZ] * where IS* are trivial strides of idims * */ extern "C" void cuda_im2col(_Complex float* dst, const _Complex float* src, const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { cuda_im2col_int2<false>(dst, src, odims, idims, kdims, dilation, strides); } /* * * Transposed/adjoint of cuda im2col * * @args dst * @args src * @args odims [OC, 1, OX, OY, OZ] * @args idims [ 1, IC, IX, IY, IZ] * @args kdims [OC, IC, KX, KY, KZ] * @args dilation [ 1, 1, DX, DY, DZ] or NULL * @args strides [ 1, 1, SX, SY, SZ] or NULL * * zadd with strides: * dims: [IC, KX, KY, KZ, OX, OY, OZ] * ostrs: [ISC, ISX * DX, ISY * DY, ISZ * DZ, ISX * SX, ISY * SY, ISZ * SZ] * istrs: trivial strides of dims * where IS* are trivial strides of idims * */ extern "C" void cuda_im2col_transp(_Complex float* dst, const _Complex float* src, const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { cuda_im2col_int2<true>(dst, src, odims, idims, kdims, dilation, strides); }
d0fc13ca15b88e17a9d08e014ddec94e47697703.cu
/* Copyright 2021. Uecker Lab. University Medical Center Göttingen. * All rights reserved. Use of this source code is governed by * a BSD-style license which can be found in the LICENSE file. * * Authors: Moritz Blumenthal */ #include <cstdint> #include <stdint.h> #include <stdio.h> #include <stdbool.h> #include <assert.h> #include <cuda_runtime_api.h> #include <cuda.h> #include <cuComplex.h> #include "misc/misc.h" #include "num/gpu_conv.h" #include "num/multind.h" // limited by hardware to 1024 on most devices // should be a multiple of 32 (warp size) #define BLOCKSIZE 1024 static void getBlockSize3_internal(int block[3], const long dims[3], const void* func) { cudaFuncAttributes attr; cudaFuncGetAttributes(&attr, func); int threads = attr.maxThreadsPerBlock; block[0] = 1; block[1] = 1; block[2] = 1; while ((threads >= 2) && (block[0] < dims[0])) { block[0] *= 2; threads /= 2; } while ((threads >= 2) && (block[1] < dims[1])) { block[1] *= 2; threads /= 2; } while ((threads >= 2) && (block[2] < dims[2])) { block[2] *= 2; threads /= 2; } } static dim3 getBlockSize3(const long dims[3], const void* func) { int block[3]; getBlockSize3_internal(block, dims, func); return dim3(block[0], block[1], block[2]); } static long gridsize_int(long N, int blocksize) { return (N + blocksize - 1) / blocksize; } static dim3 getGridSize3(const long dims[3], const void* func) { int block[3]; getBlockSize3_internal(block, dims, func); return dim3(gridsize_int(dims[0], block[0]), gridsize_int(dims[1], block[1]), gridsize_int(dims[2], block[2])); } static dim3 blocksize(int N, const void* func) { const long dims[3] = { N, 1, 1}; return getBlockSize3(dims, func); } static dim3 gridsize(long N, const void* func) { const long dims[3] = { N, 1, 1}; return getGridSize3(dims, func); } template <int DIMS, typename T> struct im2col_descriptor { T NC; // number channels T istrs_NC; // 1 T ostrs_NC; // 1 T odims[DIMS]; // dimensions of the convolution (not including channel) T kdims[DIMS]; T idims[DIMS]; T istrs_odims[DIMS]; // input strides of im2col (in elements) T istrs_kdims[DIMS]; T ostrs_kdims[DIMS]; // output strides of im2col (in elements) T ostrs_odims[DIMS]; T N_in_elements; // channels * in-dims T N_out_elements; // channels * out-dims * krn-dims T N_out_elements_o_only; // channels * out-dims T N_out_elements_k_only; // channels * krn-dims bool triv_strides_dilation; // trivial dilation and strides }; template <int DIMS, typename T> static struct im2col_descriptor<DIMS, T>get_im2col_descriptor(const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { struct im2col_descriptor<DIMS, T>config; config.NC = idims[1]; config.istrs_NC = 1; config.ostrs_NC = 1; config.N_in_elements = idims[1]; config.N_out_elements = idims[1]; config.N_out_elements_o_only = idims[1]; config.N_out_elements_k_only = idims[1]; config.triv_strides_dilation = true; long istrs[5]; md_calc_strides(5, istrs, idims, 1); for (int i = 0; i < DIMS; i++) { config.odims[i] = 1; config.kdims[i] = 1; config.idims[i] = 1; config.istrs_odims[i] = 0; config.istrs_kdims[i] = 0; config.ostrs_odims[i] = 0; config.ostrs_kdims[i] = 0; } for (int i = 2, j = 0; i < 5; i++) { if (!((1 < odims[i]) || (1 < kdims[i]))) continue; assert(j < DIMS); config.odims[j] = odims[i]; config.kdims[j] = kdims[i]; config.idims[j] = idims[i]; config.istrs_odims[j] = istrs[i] * (NULL == strides ? 1 : strides[i]); config.istrs_kdims[j] = istrs[i] * (NULL == dilation ? 1 : dilation[i]); config.N_in_elements *= idims[i]; config.N_out_elements_o_only *= odims[i]; config.N_out_elements_k_only *= kdims[i]; config.N_out_elements *= odims[i] * kdims[i]; config.triv_strides_dilation &= ( (config.istrs_odims[j] == istrs[i]) && (config.istrs_kdims[j] == istrs[i])); j++; } config.ostrs_odims[0] = config.N_out_elements_k_only; config.ostrs_kdims[0] = config.NC; for (int i = 1; i < DIMS; i++) { config.ostrs_odims[i] = config.ostrs_odims[i - 1] * config.odims[i - 1]; config.ostrs_kdims[i] = config.ostrs_kdims[i - 1] * config.kdims[i - 1]; } return config; } // loop over out-dims and krn-dims and copy elements from input (copies one element per thread) template <int DIMS, typename T, bool transp> __global__ static void kern_im2col_valid(struct im2col_descriptor<DIMS, T> config, cuFloatComplex* dst, const cuFloatComplex* src) { int start = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (T i = start; i < config.N_out_elements; i += stride) { T i_cur = i; T i_new = i; T in_index = 0; if (1 < config.NC) { i_new = i_cur / config.NC; in_index = (i_cur - config.NC * i_new) * config.istrs_NC; i_cur = i_new; } for (int j = 0; j < DIMS; j++) { i_new = i_cur / config.kdims[j]; in_index += config.istrs_kdims[j] * (i_cur - config.kdims[j] * i_new); i_cur = i_new; } for (int j = 0; j < DIMS - 1; j++) { i_new = i_cur / config.odims[j]; in_index += config.istrs_odims[j] * (i_cur - config.odims[j] * i_new); i_cur = i_new; } in_index += i_cur * config.istrs_odims[DIMS - 1]; if (transp) { atomicAdd(&(dst[in_index].x), src[i].x); atomicAdd(&(dst[in_index].y), src[i].y); } else { dst[i] = src[in_index]; } } } // loop over in-dims and copy elements from input to all corresponding output position template <int DIMS, typename T, bool transp> __global__ static void kern_im2col_valid_no_dil_str(struct im2col_descriptor<DIMS, T> config, cuFloatComplex* dst, const cuFloatComplex* src) { int start = threadIdx.x + blockDim.x * blockIdx.x; int stride = blockDim.x * gridDim.x; for (T i = start; i < config.N_in_elements; i += stride) { T i_cur = i; T i_new = i_cur / config.NC; T c = i_cur - i_new * config.NC; T idx_i[3] = { 0, 0, 0 }; T idx_k[3] = { 0, 0, 0 }; for (int j = 0; j < DIMS - 1; j++) { i_cur = i_new; i_new = i_cur / config.idims[j]; idx_i[j] = i_cur - i_new * config.idims[j]; } idx_i[DIMS - 1] = i_new; cuFloatComplex tmp = transp ? dst[i] : src[i]; T kdims[3]; for (int j = 0; j < 3; j++) kdims[j] = (j < DIMS) ? config.kdims[j] : 1; for (idx_k[2] = 0; idx_k[2] < kdims[2]; idx_k[2]++) for (idx_k[1] = 0; idx_k[1] < kdims[1]; idx_k[1]++) for (idx_k[0] = 0; idx_k[0] < kdims[0]; idx_k[0]++) { bool copy = true; T o_stride = config.N_out_elements_k_only; T k_stride = config.NC; T index = c; for (int j = 0; j < DIMS; j++) { copy = copy && (idx_k[j] <= idx_i[j]) && (idx_i[j] < idx_k[j] + config.odims[j]); index += (idx_i[j] - idx_k[j]) * o_stride + idx_k[j] * k_stride; o_stride *= config.odims[j]; k_stride *= config.kdims[j]; } if (copy) { if (transp) tmp = cuCaddf(tmp, src[index]); else dst[index] = tmp; } } if (transp) dst[i] = tmp; } } template <int DIMS, typename T, bool transp> static void cuda_im2col_int(_Complex float* dst, const _Complex float* src, const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { struct im2col_descriptor<DIMS, T> config = get_im2col_descriptor<DIMS, T>(odims, idims, kdims, dilation, strides); bool func1 = true; #ifdef NON_DETERMINISTIC bool func2 = true; #else bool func2 = !transp; #endif func1 = func1 && config.triv_strides_dilation; func1 = func1 && (!func2 || (1 < config.NC)); if (func1) { const void* func = (const void*)kern_im2col_valid_no_dil_str<DIMS, T, transp>; kern_im2col_valid_no_dil_str<DIMS, T, transp><<<gridsize(config.N_in_elements, func), blocksize(config.N_in_elements, func)>>>(config, (cuFloatComplex*) dst, (cuFloatComplex*) src); return; } if (func2) { const void* func = (const void*)kern_im2col_valid<DIMS, T, transp>; kern_im2col_valid<DIMS, T, transp><<<gridsize(config.N_in_elements, func), blocksize(config.N_in_elements, func)>>>(config, (cuFloatComplex*) dst, (cuFloatComplex*) src); return; } assert(0); } template <bool transp> static void cuda_im2col_int2(_Complex float* dst, const _Complex float* src, const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { long Nout = idims[1] * md_calc_size(3, kdims + 2) * md_calc_size(3, odims + 2); int DIMS = bitcount(md_nontriv_dims(3, kdims + 2) | md_nontriv_dims(3, odims + 2)); for (int i = 0 ; i < 3; i++) if (1 == odims[DIMS + 1] * kdims[DIMS + 1] * idims[DIMS + 1] * (NULL != dilation ? dilation[DIMS + 1] : 1) * (NULL != strides ? strides[DIMS + 1] : 1)) DIMS --; DIMS = 3; switch (DIMS) { case 1: if (INT32_MAX / 2 > Nout) cuda_im2col_int<1, uint32_t, transp>(dst, src, odims, idims, kdims, dilation, strides); else cuda_im2col_int<1, uint64_t, transp>(dst, src, odims, idims, kdims, dilation, strides); break; case 2: if (INT32_MAX / 2 > Nout) cuda_im2col_int<2, uint32_t, transp>(dst, src, odims, idims, kdims, dilation, strides); else cuda_im2col_int<2, uint64_t, transp>(dst, src, odims, idims, kdims, dilation, strides); break; case 3: if (INT32_MAX / 2 > Nout) cuda_im2col_int<3, uint32_t, transp>(dst, src, odims, idims, kdims, dilation, strides); else cuda_im2col_int<3, uint64_t, transp>(dst, src, odims, idims, kdims, dilation, strides); break; default: assert(0); } } /* * * Optimized kernel for copying im2col (complex float only) * * @args dst * @args src * @args odims [OC, 1, OX, OY, OZ] * @args idims [ 1, IC, IX, IY, IZ] * @args kdims [OC, IC, KX, KY, KZ] * @args dilation [ 1, 1, DX, DY, DZ] or NULL * @args strides [ 1, 1, SX, SY, SZ] or NULL * * Copy: * dims: [IC, KX, KY, KZ, OX, OY, OZ] * ostrs: trivial strides of dims * istrs: [ISC, ISX * DX, ISY * DY, ISZ * DZ, ISX * SX, ISY * SY, ISZ * SZ] * where IS* are trivial strides of idims * */ extern "C" void cuda_im2col(_Complex float* dst, const _Complex float* src, const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { cuda_im2col_int2<false>(dst, src, odims, idims, kdims, dilation, strides); } /* * * Transposed/adjoint of cuda im2col * * @args dst * @args src * @args odims [OC, 1, OX, OY, OZ] * @args idims [ 1, IC, IX, IY, IZ] * @args kdims [OC, IC, KX, KY, KZ] * @args dilation [ 1, 1, DX, DY, DZ] or NULL * @args strides [ 1, 1, SX, SY, SZ] or NULL * * zadd with strides: * dims: [IC, KX, KY, KZ, OX, OY, OZ] * ostrs: [ISC, ISX * DX, ISY * DY, ISZ * DZ, ISX * SX, ISY * SY, ISZ * SZ] * istrs: trivial strides of dims * where IS* are trivial strides of idims * */ extern "C" void cuda_im2col_transp(_Complex float* dst, const _Complex float* src, const long odims[5], const long idims[5], const long kdims[5], const long dilation[5], const long strides[5]) { cuda_im2col_int2<true>(dst, src, odims, idims, kdims, dilation, strides); }
e3a040bf7ab39afecf76777622b587e55d6aa44b.hip
// !!! This is a file automatically generated by hipify!!! // Array multiplication: C = A * B: // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block #endif #ifndef SIZE #define SIZE 1*1024*1024 // array size #endif #ifndef NUMTRIALS #define NUMTRIALS 100 // to make the timing more accurate #endif #ifndef TOLERANCE #define TOLERANCE 0.00001f // tolerance to relative error #endif // array multiplication (CUDA Kernel) on the device: C = A * B __global__ void ArrayMul( float *A, float *B, float *C ) { int gid = blockIdx.x*blockDim.x + threadIdx.x; C[gid] = A[gid] * B[gid]; } // main program: int main( int argc, char* argv[ ] ) { int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float * hA = new float [ SIZE ]; float * hB = new float [ SIZE ]; float * hC = new float [ SIZE ]; for( int i = 0; i < SIZE; i++ ) { hA[i] = hB[i] = (float) sqrt( (float)i ); } // allocate device memory: float *dA, *dB, *dC; dim3 dimsA( SIZE, 1, 1 ); dim3 dimsB( SIZE, 1, 1 ); dim3 dimsC( SIZE, 1, 1 ); hipError_t status; status = hipMalloc( reinterpret_cast<void **>(&dA), SIZE*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( reinterpret_cast<void **>(&dB), SIZE*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( reinterpret_cast<void **>(&dC), SIZE*sizeof(float) ); checkCudaErrors( status ); // copy host memory to the device: status = hipMemcpy( dA, hA, SIZE*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dB, hB, SIZE*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid( SIZE / threads.x, 1, 1 ); // Create and start timer hipDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: hipEvent_t start, stop; status = hipEventCreate( &start ); checkCudaErrors( status ); status = hipEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = hipEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: for( int t = 0; t < NUMTRIALS; t++) { hipLaunchKernelGGL(( ArrayMul), dim3(grid), dim3(threads) , 0, 0, dA, dB, dC ); } // record the stop event: status = hipEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = hipEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = hipEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double multsPerSecond = (float)SIZE * (float)NUMTRIALS / secondsTotal; double megaMultsPerSecond = multsPerSecond / 1000000.; fprintf( stderr, "Size = %10d, MegaMults/Second = %10.2lf\n", SIZE, megaMultsPerSecond ); // copy result from the device to the host: status = hipMemcpy( hC, dC, SIZE*sizeof(float), hipMemcpyDeviceToHost ); checkCudaErrors( status ); // check for correctness: fprintf( stderr, "Checking computed result for correctness:\n"); bool correct = true; for(int i = 1; i < SIZE; i++ ) { double error = ( (double)hC[i] - (double)i ) / (double)i; if( fabs(error) > TOLERANCE ) { fprintf( stderr, "C[%10d] = %10.2lf, correct = %10.2lf\n", i, (double)hC[i], (double)i ); correct = false; } } fprintf( stderr, "\n%s.\n", correct ? "PASS" : "FAIL" ); // clean up memory: delete [ ] hA; delete [ ] hB; delete [ ] hC; status = hipFree( dA ); checkCudaErrors( status ); status = hipFree( dB ); checkCudaErrors( status ); status = hipFree( dC ); checkCudaErrors( status ); return 0; }
e3a040bf7ab39afecf76777622b587e55d6aa44b.cu
// Array multiplication: C = A * B: // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block #endif #ifndef SIZE #define SIZE 1*1024*1024 // array size #endif #ifndef NUMTRIALS #define NUMTRIALS 100 // to make the timing more accurate #endif #ifndef TOLERANCE #define TOLERANCE 0.00001f // tolerance to relative error #endif // array multiplication (CUDA Kernel) on the device: C = A * B __global__ void ArrayMul( float *A, float *B, float *C ) { int gid = blockIdx.x*blockDim.x + threadIdx.x; C[gid] = A[gid] * B[gid]; } // main program: int main( int argc, char* argv[ ] ) { int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float * hA = new float [ SIZE ]; float * hB = new float [ SIZE ]; float * hC = new float [ SIZE ]; for( int i = 0; i < SIZE; i++ ) { hA[i] = hB[i] = (float) sqrt( (float)i ); } // allocate device memory: float *dA, *dB, *dC; dim3 dimsA( SIZE, 1, 1 ); dim3 dimsB( SIZE, 1, 1 ); dim3 dimsC( SIZE, 1, 1 ); cudaError_t status; status = cudaMalloc( reinterpret_cast<void **>(&dA), SIZE*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( reinterpret_cast<void **>(&dB), SIZE*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( reinterpret_cast<void **>(&dC), SIZE*sizeof(float) ); checkCudaErrors( status ); // copy host memory to the device: status = cudaMemcpy( dA, hA, SIZE*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dB, hB, SIZE*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid( SIZE / threads.x, 1, 1 ); // Create and start timer cudaDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: cudaEvent_t start, stop; status = cudaEventCreate( &start ); checkCudaErrors( status ); status = cudaEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = cudaEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: for( int t = 0; t < NUMTRIALS; t++) { ArrayMul<<< grid, threads >>>( dA, dB, dC ); } // record the stop event: status = cudaEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = cudaEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = cudaEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double multsPerSecond = (float)SIZE * (float)NUMTRIALS / secondsTotal; double megaMultsPerSecond = multsPerSecond / 1000000.; fprintf( stderr, "Size = %10d, MegaMults/Second = %10.2lf\n", SIZE, megaMultsPerSecond ); // copy result from the device to the host: status = cudaMemcpy( hC, dC, SIZE*sizeof(float), cudaMemcpyDeviceToHost ); checkCudaErrors( status ); // check for correctness: fprintf( stderr, "Checking computed result for correctness:\n"); bool correct = true; for(int i = 1; i < SIZE; i++ ) { double error = ( (double)hC[i] - (double)i ) / (double)i; if( fabs(error) > TOLERANCE ) { fprintf( stderr, "C[%10d] = %10.2lf, correct = %10.2lf\n", i, (double)hC[i], (double)i ); correct = false; } } fprintf( stderr, "\n%s.\n", correct ? "PASS" : "FAIL" ); // clean up memory: delete [ ] hA; delete [ ] hB; delete [ ] hC; status = cudaFree( dA ); checkCudaErrors( status ); status = cudaFree( dB ); checkCudaErrors( status ); status = cudaFree( dC ); checkCudaErrors( status ); return 0; }
ddb24e8bd449af0e0ceeb48d4e01545a70223740.hip
// !!! This is a file automatically generated by hipify!!! #include "smoothing.h" #include <iostream> #include <algorithm> #include <math.h> #include <cstring> #include "hip/hip_runtime.h" #include "hip/device_functions.h" #include "device_launch_parameters.h" #include <thrust/sort.h> #define BLOCKSIZE 32 hpcparallel::smoothing::smoothing(int resolution, int binsize, int* bins, int filtersize) : resolution(resolution), binsize(binsize), bins(bins), filtersize(filtersize) { window = new int[filtersize * filtersize]; memset(window, 0, filtersize * filtersize * sizeof(int)); filteredBins = new int[binsize]; //memset(filteredBins, 0, binsize * sizeof(int)); } __device__ void sort(int window[], int length) { for (int i = 0; i < length; ++i){ for (int j = i + 1; j < length; ++j){ if (window[i] > window[j]) { int temp = window[i]; window[i] = window[j]; window[j] = temp; } } } } __global__ void medianFilter3x3Kernel(const int* dev_bins, int* dev_filteredBins, int resolution, int binsize) { int tx = blockDim.x * blockIdx.x + threadIdx.x; int ty = blockDim.y * blockIdx.y + threadIdx.y; int tlx = threadIdx.x; int tly = threadIdx.y; if (tx < resolution && ty < resolution) { //edge values for shared memory are values needed by the window but whose median is not calculated in this block. //shared array is 1 row/column bigger on every side for edge cases. __shared__ int sm_bins[BLOCKSIZE + 2][BLOCKSIZE + 2]; //populate shared memory block values which are not edge values sm_bins[tly + 1][tlx + 1] = dev_bins[ty*resolution + tx]; //check if index on the edge of the block bool tx_left_edge = (tlx == 0); bool ty_top_edge = (tly == 0); bool tx_right_edge = (tlx == BLOCKSIZE - 1); bool ty_bot_edge = (tly == BLOCKSIZE - 1); // -1 padding for values not in dev_bins if (tx_left_edge) sm_bins[tly + 1][tlx] = -1; else if (tx_right_edge) sm_bins[tly + 1][tlx + 2] = -1; if (ty_top_edge) { sm_bins[tly][tlx + 1] = -1; if (tx_left_edge) sm_bins[tly][tlx] = -1; else if (tx_right_edge) sm_bins[tly][tlx + 2] = -1; } else if (ty_bot_edge) { sm_bins[tly + 2][tlx + 1] = -1; if (tx_left_edge) sm_bins[tly + 2][tlx] = -1; else if (tx_right_edge) sm_bins[tly + 2][tlx + 2] = -1; } //check if shared memory edge is global edge and then don't include it tx_left_edge &= (tx > 0); tx_right_edge &= (tx < resolution - 1); ty_top_edge &= (ty > 0); ty_bot_edge &= (ty < resolution - 1); // pull edge values into shared memory using threads on edges of block if (tx_left_edge) sm_bins[tly + 1][tlx] = dev_bins[ty*resolution + tx - 1]; else if (tx_right_edge) sm_bins[tly + 1][tlx + 2] = dev_bins[ty*resolution + tx + 1]; if (ty_top_edge) { sm_bins[tly][tlx + 1] = dev_bins[(ty - 1)*resolution + tx]; if (tx_left_edge) sm_bins[tly][tlx] = dev_bins[(ty - 1)*resolution + tx - 1]; else if (tx_right_edge) sm_bins[tly][tlx + 2] = dev_bins[(ty - 1)*resolution + tx + 1]; } else if (ty_bot_edge) { sm_bins[tly + 2][tlx + 1] = dev_bins[(ty + 1)*resolution + tx]; if (tx_left_edge) sm_bins[tly + 2][tlx] = dev_bins[(ty + 1)*resolution + tx - 1]; else if (tx_right_edge) sm_bins[tly + 2][tlx + 2] = dev_bins[(ty + 1)*resolution + tx + 1]; } __syncthreads(); int window[9] = { sm_bins[tly][tlx], sm_bins[tly][tlx + 1], sm_bins[tly][tlx + 2], sm_bins[tly + 1][tlx], sm_bins[tly + 1][tlx + 1], sm_bins[tly + 1][tlx + 2], sm_bins[tly + 2][tlx], sm_bins[tly + 2][tlx + 1], sm_bins[tly + 2][tlx + 2] }; //thrust::sort(thrust::seq, window, window + 9); //sort sort(window, 9); int edges = 0; // count number of elements equal to -1, values which are not in the global array for (int i = 0; i < 9; ++i) { if (window[i] == -1) edges++; } // the median is calculated only with values actually inside the array, values outside array are -1 // for even number of values in window calculate average between the 2 middle values int median; int mi = (9 - edges); if (mi % 2 == 0) { //throwing away decimal part of average median = ((window[(mi / 2) - 1 + edges] + window[(mi / 2) + edges]) / 2); } else { median = window[(mi / 2) + edges]; } dev_filteredBins[ty * resolution + tx] = median; } } template<int WINDOWSIZE, int FILTERSIZE, int EDGESIZE> __global__ void medianFilterTemplateKernel(const int* dev_bins, int* dev_filteredBins, int resolution, int binsize) { int tx = blockDim.x * blockIdx.x + threadIdx.x; int ty = blockDim.y * blockIdx.y + threadIdx.y; int tlx = threadIdx.x; int tly = threadIdx.y; // edge size is Filtersize/2 integer division, half the length of the window excluding the value we calculating in the middle if (tx < resolution && ty < resolution) { __shared__ int sm_bins[BLOCKSIZE + FILTERSIZE - 1][BLOCKSIZE + FILTERSIZE - 1]; //populate shared memory //first populate values which fall inside the block (not edge values) sm_bins[tly + EDGESIZE][tlx + EDGESIZE] = dev_bins[ty*resolution + tx]; //check if index on the edge of the block // in this case the edge of the block is the border of the block with thickness equal to edgesize bool tx_left_edge = (tlx < EDGESIZE); bool ty_top_edge = (tly < EDGESIZE); bool tx_right_edge = (tlx > BLOCKSIZE - EDGESIZE - 1); bool ty_bot_edge = (tly > BLOCKSIZE - EDGESIZE - 1); // -1 padding for edge values if (tx_left_edge) sm_bins[tly + EDGESIZE][tlx] = -1; else if (tx_right_edge) sm_bins[tly + EDGESIZE][tlx + (2 * EDGESIZE)] = -1; if (ty_top_edge) { sm_bins[tly][tlx + EDGESIZE] = -1; if (tx_left_edge) sm_bins[tly][tlx] = -1; else if (tx_right_edge) sm_bins[tly][tlx + (2 * EDGESIZE)] = -1; } else if (ty_bot_edge) { sm_bins[tly + (2 * EDGESIZE)][tlx + EDGESIZE] = -1; if (tx_left_edge) sm_bins[tly + (2 * EDGESIZE)][tlx] = -1; else if (tx_right_edge) sm_bins[tly + (2 * EDGESIZE)][tlx + (2 * EDGESIZE)] = -1; } //check if shared memory edge is global edge and then don't include it tx_left_edge &= (tx - EDGESIZE > 0); tx_right_edge &= (tx + EDGESIZE < resolution); ty_top_edge &= (ty - EDGESIZE > 0); ty_bot_edge &= (ty + EDGESIZE < resolution); //populate edge values if (tx_left_edge) sm_bins[tly + EDGESIZE][tlx] = dev_bins[ty*resolution + tx - EDGESIZE]; else if (tx_right_edge) sm_bins[tly + EDGESIZE][tlx + (2 * EDGESIZE)] = dev_bins[ty*resolution + tx + EDGESIZE]; if (ty_top_edge) { sm_bins[tly][tlx + EDGESIZE] = dev_bins[(ty - EDGESIZE)*resolution + tx]; if (tx_left_edge) sm_bins[tly][tlx] = dev_bins[(ty - EDGESIZE)*resolution + tx - EDGESIZE]; else if (tx_right_edge) sm_bins[tly][tlx + (2 * EDGESIZE)] = dev_bins[(ty - EDGESIZE)*resolution + tx + EDGESIZE]; } else if (ty_bot_edge) { sm_bins[tly + (2 * EDGESIZE)][tlx + EDGESIZE] = dev_bins[(ty + EDGESIZE)*resolution + tx]; if (tx_left_edge) sm_bins[tly + (2 * EDGESIZE)][tlx] = dev_bins[(ty + EDGESIZE)*resolution + tx - EDGESIZE]; else if (tx_right_edge) sm_bins[tly + (2 * EDGESIZE)][tlx + (2 * EDGESIZE)] = dev_bins[(ty + EDGESIZE)*resolution + tx + EDGESIZE]; } __syncthreads(); // create window for this thread index int window[WINDOWSIZE]; int startx = tlx; int starty = tly; int endx = tlx + FILTERSIZE; int endy = tly + FILTERSIZE; int i = 0; for (int y = starty; y < endy; ++y) { for (int x = startx; x < endx; ++x) { window[i] = sm_bins[y][x]; i++; } } //sort window thrust for some reason gives an error when using filtersize of 13 and up. //simple sort below appears to be faster than thrust sort. //thrust::sort(thrust::seq, window, window + WINDOWSIZE); //sort sort(window, WINDOWSIZE); int edges = 0; // count number of elements equal to -1, values which are not in the global array for (int i = 0; i < WINDOWSIZE; ++i) { if (window[i] == -1) edges++; } // the median is calculated only with values actually inside the array, values outside array are -1 // for even number of values in window calculate average between the 2 middle values int median; int mi = (WINDOWSIZE - edges); if (mi % 2 == 0) { //throwing away decimal part of average median = ((window[(mi / 2) - 1 + edges] + window[(mi / 2) + edges]) / 2); } else { median = window[(mi / 2) + edges]; } dev_filteredBins[ty * resolution + tx] = median; } } #define cudaSafe(statuscode, description) { gpuAssert(statuscode, description, __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char* description, const char *file, int line, bool abort=true) { if (code != hipSuccess) { std::cout <<"Cuda error: " << description << ", " << hipGetErrorString(code) << " " << file << " " << line << std::endl;; // if (abort) exit(code); } } int* hpcparallel::smoothing::applyFilter(float* kernelTime) { //struct hipFuncAttributes funcAttrib; //cudaSafe(hipFuncGetAttributes(&funcAttrib, medianFilterTemplateKernel<20>), "cudafuncgetattributes"); //printf("%s numRegs=%d\n", "medianFilterTemplateKernel", funcAttrib.numRegs); int* dev_bins = 0; int* dev_filteredBins = 0; cudaMedianFilter(dev_bins, dev_filteredBins, kernelTime); cudaSafe(hipDeviceReset(), "cuda device reset"); return filteredBins; } void hpcparallel::smoothing::cudaMedianFilter(int* dev_bins, int* dev_filteredBins, float* kernelTime) { hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); cudaSafe(hipSetDevice(0), "set device"); cudaSafe(hipMalloc((void**)&dev_bins, binsize * sizeof(int)), "cuda malloc dev_bins"); cudaSafe(hipMalloc((void**)&dev_filteredBins, binsize * sizeof(int)), "cuda malloc dev_filteredBins"); cudaSafe(hipMemcpy(dev_bins, bins, binsize * sizeof(int), hipMemcpyHostToDevice), "cuda memcpy htd dev_bins"); cudaSafe(hipMemset(dev_filteredBins, 0, binsize * sizeof(int)), "cuda memset dev_filteredBins"); dim3 numThreads(BLOCKSIZE, BLOCKSIZE); dim3 numBlocks; numBlocks.x = (int)ceil(resolution / (float)numThreads.x); numBlocks.y = (int)ceil(resolution / (float)numThreads.y); //std::cout << "numBlocks & numThreads: " << numBlocks.x << " " << numThreads.x << " " << numBlocks.x * numThreads.x << " " << resolution << std::endl; //std::cout << "numBlocks & numThreads: " << numBlocks.y << " " << numThreads.y << " " << numBlocks.y * numThreads.y << " " << resolution << std::endl; //medianFilterKernel << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize, filtersize, int(filtersize / 2), filtersize*filtersize); hipEventRecord(start); switch (filtersize) { case 3: medianFilter3x3Kernel << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 5: medianFilterTemplateKernel<25, 5, 2> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 7: medianFilterTemplateKernel<49, 7, 3> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 9: medianFilterTemplateKernel<81, 9, 4> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 11: medianFilterTemplateKernel<121, 11, 5> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 13: medianFilterTemplateKernel<169, 13, 6> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 15: medianFilterTemplateKernel<225, 15, 7> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 17: medianFilterTemplateKernel<289, 17, 8> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 19: medianFilterTemplateKernel<361, 19, 9> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 21: medianFilterTemplateKernel<441, 21, 10> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; } hipEventRecord(stop); cudaSafe(hipGetLastError(), "cuda launch"); cudaSafe(hipDeviceSynchronize(), "cuda device synchronize"); cudaSafe(hipMemcpy(filteredBins, dev_filteredBins, binsize * sizeof(int), hipMemcpyDeviceToHost), "cuda memcpy dth dev_filteredBins"); cudaSafe(hipFree(dev_bins), "cuda free"); cudaSafe(hipFree(dev_filteredBins), "cuda free"); hipEventSynchronize(stop); hipEventElapsedTime(kernelTime, start, stop); } hpcparallel::smoothing::~smoothing() { delete window; delete filteredBins; }
ddb24e8bd449af0e0ceeb48d4e01545a70223740.cu
#include "smoothing.h" #include <iostream> #include <algorithm> #include <math.h> #include <cstring> #include "cuda_runtime.h" #include "device_functions.h" #include "device_launch_parameters.h" #include <thrust/sort.h> #define BLOCKSIZE 32 hpcparallel::smoothing::smoothing(int resolution, int binsize, int* bins, int filtersize) : resolution(resolution), binsize(binsize), bins(bins), filtersize(filtersize) { window = new int[filtersize * filtersize]; memset(window, 0, filtersize * filtersize * sizeof(int)); filteredBins = new int[binsize]; //memset(filteredBins, 0, binsize * sizeof(int)); } __device__ void sort(int window[], int length) { for (int i = 0; i < length; ++i){ for (int j = i + 1; j < length; ++j){ if (window[i] > window[j]) { int temp = window[i]; window[i] = window[j]; window[j] = temp; } } } } __global__ void medianFilter3x3Kernel(const int* dev_bins, int* dev_filteredBins, int resolution, int binsize) { int tx = blockDim.x * blockIdx.x + threadIdx.x; int ty = blockDim.y * blockIdx.y + threadIdx.y; int tlx = threadIdx.x; int tly = threadIdx.y; if (tx < resolution && ty < resolution) { //edge values for shared memory are values needed by the window but whose median is not calculated in this block. //shared array is 1 row/column bigger on every side for edge cases. __shared__ int sm_bins[BLOCKSIZE + 2][BLOCKSIZE + 2]; //populate shared memory block values which are not edge values sm_bins[tly + 1][tlx + 1] = dev_bins[ty*resolution + tx]; //check if index on the edge of the block bool tx_left_edge = (tlx == 0); bool ty_top_edge = (tly == 0); bool tx_right_edge = (tlx == BLOCKSIZE - 1); bool ty_bot_edge = (tly == BLOCKSIZE - 1); // -1 padding for values not in dev_bins if (tx_left_edge) sm_bins[tly + 1][tlx] = -1; else if (tx_right_edge) sm_bins[tly + 1][tlx + 2] = -1; if (ty_top_edge) { sm_bins[tly][tlx + 1] = -1; if (tx_left_edge) sm_bins[tly][tlx] = -1; else if (tx_right_edge) sm_bins[tly][tlx + 2] = -1; } else if (ty_bot_edge) { sm_bins[tly + 2][tlx + 1] = -1; if (tx_left_edge) sm_bins[tly + 2][tlx] = -1; else if (tx_right_edge) sm_bins[tly + 2][tlx + 2] = -1; } //check if shared memory edge is global edge and then don't include it tx_left_edge &= (tx > 0); tx_right_edge &= (tx < resolution - 1); ty_top_edge &= (ty > 0); ty_bot_edge &= (ty < resolution - 1); // pull edge values into shared memory using threads on edges of block if (tx_left_edge) sm_bins[tly + 1][tlx] = dev_bins[ty*resolution + tx - 1]; else if (tx_right_edge) sm_bins[tly + 1][tlx + 2] = dev_bins[ty*resolution + tx + 1]; if (ty_top_edge) { sm_bins[tly][tlx + 1] = dev_bins[(ty - 1)*resolution + tx]; if (tx_left_edge) sm_bins[tly][tlx] = dev_bins[(ty - 1)*resolution + tx - 1]; else if (tx_right_edge) sm_bins[tly][tlx + 2] = dev_bins[(ty - 1)*resolution + tx + 1]; } else if (ty_bot_edge) { sm_bins[tly + 2][tlx + 1] = dev_bins[(ty + 1)*resolution + tx]; if (tx_left_edge) sm_bins[tly + 2][tlx] = dev_bins[(ty + 1)*resolution + tx - 1]; else if (tx_right_edge) sm_bins[tly + 2][tlx + 2] = dev_bins[(ty + 1)*resolution + tx + 1]; } __syncthreads(); int window[9] = { sm_bins[tly][tlx], sm_bins[tly][tlx + 1], sm_bins[tly][tlx + 2], sm_bins[tly + 1][tlx], sm_bins[tly + 1][tlx + 1], sm_bins[tly + 1][tlx + 2], sm_bins[tly + 2][tlx], sm_bins[tly + 2][tlx + 1], sm_bins[tly + 2][tlx + 2] }; //thrust::sort(thrust::seq, window, window + 9); //sort sort(window, 9); int edges = 0; // count number of elements equal to -1, values which are not in the global array for (int i = 0; i < 9; ++i) { if (window[i] == -1) edges++; } // the median is calculated only with values actually inside the array, values outside array are -1 // for even number of values in window calculate average between the 2 middle values int median; int mi = (9 - edges); if (mi % 2 == 0) { //throwing away decimal part of average median = ((window[(mi / 2) - 1 + edges] + window[(mi / 2) + edges]) / 2); } else { median = window[(mi / 2) + edges]; } dev_filteredBins[ty * resolution + tx] = median; } } template<int WINDOWSIZE, int FILTERSIZE, int EDGESIZE> __global__ void medianFilterTemplateKernel(const int* dev_bins, int* dev_filteredBins, int resolution, int binsize) { int tx = blockDim.x * blockIdx.x + threadIdx.x; int ty = blockDim.y * blockIdx.y + threadIdx.y; int tlx = threadIdx.x; int tly = threadIdx.y; // edge size is Filtersize/2 integer division, half the length of the window excluding the value we calculating in the middle if (tx < resolution && ty < resolution) { __shared__ int sm_bins[BLOCKSIZE + FILTERSIZE - 1][BLOCKSIZE + FILTERSIZE - 1]; //populate shared memory //first populate values which fall inside the block (not edge values) sm_bins[tly + EDGESIZE][tlx + EDGESIZE] = dev_bins[ty*resolution + tx]; //check if index on the edge of the block // in this case the edge of the block is the border of the block with thickness equal to edgesize bool tx_left_edge = (tlx < EDGESIZE); bool ty_top_edge = (tly < EDGESIZE); bool tx_right_edge = (tlx > BLOCKSIZE - EDGESIZE - 1); bool ty_bot_edge = (tly > BLOCKSIZE - EDGESIZE - 1); // -1 padding for edge values if (tx_left_edge) sm_bins[tly + EDGESIZE][tlx] = -1; else if (tx_right_edge) sm_bins[tly + EDGESIZE][tlx + (2 * EDGESIZE)] = -1; if (ty_top_edge) { sm_bins[tly][tlx + EDGESIZE] = -1; if (tx_left_edge) sm_bins[tly][tlx] = -1; else if (tx_right_edge) sm_bins[tly][tlx + (2 * EDGESIZE)] = -1; } else if (ty_bot_edge) { sm_bins[tly + (2 * EDGESIZE)][tlx + EDGESIZE] = -1; if (tx_left_edge) sm_bins[tly + (2 * EDGESIZE)][tlx] = -1; else if (tx_right_edge) sm_bins[tly + (2 * EDGESIZE)][tlx + (2 * EDGESIZE)] = -1; } //check if shared memory edge is global edge and then don't include it tx_left_edge &= (tx - EDGESIZE > 0); tx_right_edge &= (tx + EDGESIZE < resolution); ty_top_edge &= (ty - EDGESIZE > 0); ty_bot_edge &= (ty + EDGESIZE < resolution); //populate edge values if (tx_left_edge) sm_bins[tly + EDGESIZE][tlx] = dev_bins[ty*resolution + tx - EDGESIZE]; else if (tx_right_edge) sm_bins[tly + EDGESIZE][tlx + (2 * EDGESIZE)] = dev_bins[ty*resolution + tx + EDGESIZE]; if (ty_top_edge) { sm_bins[tly][tlx + EDGESIZE] = dev_bins[(ty - EDGESIZE)*resolution + tx]; if (tx_left_edge) sm_bins[tly][tlx] = dev_bins[(ty - EDGESIZE)*resolution + tx - EDGESIZE]; else if (tx_right_edge) sm_bins[tly][tlx + (2 * EDGESIZE)] = dev_bins[(ty - EDGESIZE)*resolution + tx + EDGESIZE]; } else if (ty_bot_edge) { sm_bins[tly + (2 * EDGESIZE)][tlx + EDGESIZE] = dev_bins[(ty + EDGESIZE)*resolution + tx]; if (tx_left_edge) sm_bins[tly + (2 * EDGESIZE)][tlx] = dev_bins[(ty + EDGESIZE)*resolution + tx - EDGESIZE]; else if (tx_right_edge) sm_bins[tly + (2 * EDGESIZE)][tlx + (2 * EDGESIZE)] = dev_bins[(ty + EDGESIZE)*resolution + tx + EDGESIZE]; } __syncthreads(); // create window for this thread index int window[WINDOWSIZE]; int startx = tlx; int starty = tly; int endx = tlx + FILTERSIZE; int endy = tly + FILTERSIZE; int i = 0; for (int y = starty; y < endy; ++y) { for (int x = startx; x < endx; ++x) { window[i] = sm_bins[y][x]; i++; } } //sort window thrust for some reason gives an error when using filtersize of 13 and up. //simple sort below appears to be faster than thrust sort. //thrust::sort(thrust::seq, window, window + WINDOWSIZE); //sort sort(window, WINDOWSIZE); int edges = 0; // count number of elements equal to -1, values which are not in the global array for (int i = 0; i < WINDOWSIZE; ++i) { if (window[i] == -1) edges++; } // the median is calculated only with values actually inside the array, values outside array are -1 // for even number of values in window calculate average between the 2 middle values int median; int mi = (WINDOWSIZE - edges); if (mi % 2 == 0) { //throwing away decimal part of average median = ((window[(mi / 2) - 1 + edges] + window[(mi / 2) + edges]) / 2); } else { median = window[(mi / 2) + edges]; } dev_filteredBins[ty * resolution + tx] = median; } } #define cudaSafe(statuscode, description) { gpuAssert(statuscode, description, __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char* description, const char *file, int line, bool abort=true) { if (code != cudaSuccess) { std::cout <<"Cuda error: " << description << ", " << cudaGetErrorString(code) << " " << file << " " << line << std::endl;; // if (abort) exit(code); } } int* hpcparallel::smoothing::applyFilter(float* kernelTime) { //struct cudaFuncAttributes funcAttrib; //cudaSafe(cudaFuncGetAttributes(&funcAttrib, medianFilterTemplateKernel<20>), "cudafuncgetattributes"); //printf("%s numRegs=%d\n", "medianFilterTemplateKernel", funcAttrib.numRegs); int* dev_bins = 0; int* dev_filteredBins = 0; cudaMedianFilter(dev_bins, dev_filteredBins, kernelTime); cudaSafe(cudaDeviceReset(), "cuda device reset"); return filteredBins; } void hpcparallel::smoothing::cudaMedianFilter(int* dev_bins, int* dev_filteredBins, float* kernelTime) { cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); cudaSafe(cudaSetDevice(0), "set device"); cudaSafe(cudaMalloc((void**)&dev_bins, binsize * sizeof(int)), "cuda malloc dev_bins"); cudaSafe(cudaMalloc((void**)&dev_filteredBins, binsize * sizeof(int)), "cuda malloc dev_filteredBins"); cudaSafe(cudaMemcpy(dev_bins, bins, binsize * sizeof(int), cudaMemcpyHostToDevice), "cuda memcpy htd dev_bins"); cudaSafe(cudaMemset(dev_filteredBins, 0, binsize * sizeof(int)), "cuda memset dev_filteredBins"); dim3 numThreads(BLOCKSIZE, BLOCKSIZE); dim3 numBlocks; numBlocks.x = (int)ceil(resolution / (float)numThreads.x); numBlocks.y = (int)ceil(resolution / (float)numThreads.y); //std::cout << "numBlocks & numThreads: " << numBlocks.x << " " << numThreads.x << " " << numBlocks.x * numThreads.x << " " << resolution << std::endl; //std::cout << "numBlocks & numThreads: " << numBlocks.y << " " << numThreads.y << " " << numBlocks.y * numThreads.y << " " << resolution << std::endl; //medianFilterKernel << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize, filtersize, int(filtersize / 2), filtersize*filtersize); cudaEventRecord(start); switch (filtersize) { case 3: medianFilter3x3Kernel << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 5: medianFilterTemplateKernel<25, 5, 2> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 7: medianFilterTemplateKernel<49, 7, 3> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 9: medianFilterTemplateKernel<81, 9, 4> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 11: medianFilterTemplateKernel<121, 11, 5> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 13: medianFilterTemplateKernel<169, 13, 6> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 15: medianFilterTemplateKernel<225, 15, 7> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 17: medianFilterTemplateKernel<289, 17, 8> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 19: medianFilterTemplateKernel<361, 19, 9> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; case 21: medianFilterTemplateKernel<441, 21, 10> << <numBlocks, numThreads >> >(dev_bins, dev_filteredBins, resolution, binsize); break; } cudaEventRecord(stop); cudaSafe(cudaGetLastError(), "cuda launch"); cudaSafe(cudaDeviceSynchronize(), "cuda device synchronize"); cudaSafe(cudaMemcpy(filteredBins, dev_filteredBins, binsize * sizeof(int), cudaMemcpyDeviceToHost), "cuda memcpy dth dev_filteredBins"); cudaSafe(cudaFree(dev_bins), "cuda free"); cudaSafe(cudaFree(dev_filteredBins), "cuda free"); cudaEventSynchronize(stop); cudaEventElapsedTime(kernelTime, start, stop); } hpcparallel::smoothing::~smoothing() { delete window; delete filteredBins; }
ff2fbf90cdc53f74cbf3a1a7d792486c4b1c1358.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /** * @brief * utils * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include "k2/csrc/macros.h" #include "k2/csrc/math.h" #include "k2/csrc/nvtx.h" #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); hipLaunchKernelGGL(( FillValuesKernel), dim3(grid_size), dim3(block_size), 0, 0, data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { NVTX_RANGE(K2_FUNC); if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) { row_ids[i] = -1; }; Eval(c, num_elems + 1, lambda_init_minus_one); auto lambda_phase_one = [=] __host__ __device__(int32_t i) { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same // value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < next_row_split // such that i is the result of rounding up this_row_split to // (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic // in (next_row_split - this_row_split). we can then fill in the gaps // with a logarithmic-time loop, by looking for a value that's not (-1) // by rounding the current index down to successively higher powers // of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j + // (1<<power) < next_row_split" in the loop condition. // Note, we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }; Eval(c, num_elems + 1, lambda_phase_one); auto lambda_phase_two = [=] __host__ __device__(int32_t j) { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }; // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. Eval(c, num_elems, lambda_phase_two); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { NVTX_RANGE(K2_FUNC); // process corner case first if (num_elems == 0) { auto lambda_set_values = [=] __host__ __device__(int32_t i) { row_splits[i] = 0; }; Eval(c, num_rows + 1, lambda_set_values); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __host__ __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; Eval(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = (num_elems + 1) * threads_per_elem; // +1 for the last row int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel), dim3(grid_size), dim3(block_size), 0, c->GetCudaStream(), num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(hipStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { NVTX_RANGE(K2_FUNC); if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (::min(next_row_split / dart_separation, num_tasks) - ::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); hipLaunchKernelGGL(( K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task>) , dim3(grid_size), dim3(block_size), 0, stream, num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
ff2fbf90cdc53f74cbf3a1a7d792486c4b1c1358.cu
/** * @brief * utils * * @copyright * Copyright (c) 2020 Xiaomi Corporation (authors: Daniel Povey, Haowen Qiu) * * @copyright * See LICENSE for clarification regarding multiple authors */ #include <algorithm> #include "k2/csrc/macros.h" #include "k2/csrc/math.h" #include "k2/csrc/nvtx.h" #include "k2/csrc/utils.h" namespace k2 { // See FillValues() where this is invoked. It fills a region with // a constant value. __global__ void FillValuesKernel(int32_t *data, int32_t num_values, int32_t value) { int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x), stride = (gridDim.x * blockDim.x); for (; job_idx < num_values; job_idx += stride) data[job_idx] = value; } // This launches a kernel. It's the same as doing: // for (int32_t i = 0; i < num_values; i++) data[i] = value; __device__ void FillValues(int32_t *data, int32_t num_values, int32_t value) { int32_t block_size = 256; int32_t grid_size = NumBlocks(num_values, block_size); FillValuesKernel<<<grid_size, block_size>>>(data, num_values, value); } // When we invoke this we make a big enough grid that there doesn't have to // be a loop over rows, i.e. (gridDim.x * blockDim.x) / threads_per_row >= // num_rows __global__ void RowSplitsToRowIdsKernel(int32_t num_rows, int32_t threads_per_row, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x, num_threads = gridDim.x * blockDim.x, row = thread / threads_per_row, thread_this_row = thread % threads_per_row; if (row >= num_rows) return; K2_CHECK_GE(num_threads / threads_per_row, num_rows); int32_t this_row_split = row_splits[row], next_row_split = row_splits[row + 1], row_length = next_row_split - this_row_split; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (row_length / threads_per_row > max_loop) { // We decide that looping too many times will be too slow, so we launch // another kernel to fill in the value for this row. (This is CUDA dynamic // parallelism). if (thread_this_row == 0) { FillValues(row_ids + this_row_split, row_length, row); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_row < row_length; thread_this_row += threads_per_row) row_ids[this_row_split + thread_this_row] = row; } } /* See declaration of RowSplitsToRowIds() in utils.h. These are implementation notes. Suppose the range we need to fill with a particular number (say, x) is from 1010 to 10000 inclusive (binary) The first kernel writes x to positions 1010, 1100, 10000; the significance of that sequence is we keep adding the smallest number we can add to get another zero at the end of the binary representation, until we exceed the range we're supposed to fill. The second kernel: for a given index into x that is must fill (say, 1111), it asks "is the index currently here already the right one?", which it can test using the function is_valid_index() below; if it's not already correct, it searches in a sequence of positions: 1110, 1100, 1000, 0000, like our sequence above but going downwards, again getting more zeros at the end of the binary representation, until it finds the correct value in the array at the searched position; then it copies the discovered value the original position requested (here, 1111). First kernel pseudocode: for each index 'i' into 't', it does: for (int32_t n=0, j = t[i]; j < t[i+1]; n++) { x[j] = i; if (j & (1<<n)) j += (1 << n); } Second kernel pseudocode: for each element of x, it searches for the right index. Suppose we're given num_indexes == length(n) == length(t) - 1. Define is_valid_index as follows: // returns true if j is the value that we should be putting at position 'i' in x: // that is, if t[j] <= i < t[j+1]. bool is_valid_index(i, j) { return (j >= 0 && j < num_indexes && t[j] <= i && i < t[j+1]); } // We suppose we are given i (the position into x that we're responsible for // setting: orig_i = i; for (int32_t n=0; !is_valid_index(i, x[i]); n++) { if (i & (1<<n)) i -= (1 << n); } x[orig_i] = x[i]; */ void RowSplitsToRowIds(ContextPtr c, int32_t num_rows, const int32_t *row_splits, int32_t num_elems, int32_t *row_ids) { NVTX_RANGE(K2_FUNC); if (num_rows <= 0 || num_elems <= 0) return; DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row_start = row_splits[0]; K2_CHECK_EQ(cur_row_start, 0); K2_CHECK_EQ(row_splits[num_rows], num_elems); for (int32_t row = 0; row < num_rows; ++row) { int32_t next_row_start = row_splits[row + 1]; for (; cur_row_start < next_row_start; ++cur_row_start) row_ids[cur_row_start] = row; } } else { K2_CHECK_EQ(d, kCuda); if (1) { // TODO: compare this for speed with the other branch. This is branch is // much simpler, and will be considerably faster for "normal" cases -> // probably preferred. int32_t avg_elems_per_row = (num_elems + num_rows - 1) / num_rows, threads_per_row = RoundUpToNearestPowerOfTwo(avg_elems_per_row), tot_threads = num_rows * threads_per_row; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowSplitsToRowIdsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_rows, threads_per_row, row_splits, num_elems, row_ids)); } else { // TODO: Will probably just delete this branch at some point. // The following algorithm isn't particularly adapted to GPU hardware in // terms of coalesced reads and writes and so on, but it has reasonable // asymptotic time complexity (assuming all kernels run in parallel), // specifically: O(log(largest(row_splits[i+1]-row_splits[i]))) auto lambda_init_minus_one = [=] __host__ __device__(int32_t i) { row_ids[i] = -1; }; Eval(c, num_elems + 1, lambda_init_minus_one); auto lambda_phase_one = [=] __host__ __device__(int32_t i) { int32_t this_row_split = row_splits[i], next_row_split = (i < num_rows ? row_splits[i + 1] : this_row_split + 1); if (this_row_split < next_row_split) row_ids[this_row_split] = i; // we have to fill in row_ids[this_row_split], // row_ids[this_row_split+1]... row_ids[next_row_split-1] with the same // value but that could be a long loop. Instead we write at // this_row_split and all indexes this_row_split < i < next_row_split // such that i is the result of rounding up this_row_split to // (something)*2^n, for n = 1, 2, 3, ... this will take time logarithmic // in (next_row_split - this_row_split). we can then fill in the gaps // with a logarithmic-time loop, by looking for a value that's not (-1) // by rounding the current index down to successively higher powers // of 2. for (int32_t power = 0, j = this_row_split; j + (1 << power) < next_row_split; power++) { if (j & (1 << power)) { j += (1 << power); // we know that j is now < next_row_split, because we checked "j + // (1<<power) < next_row_split" in the loop condition. // Note, we don't want a loop-within-a-loop because of how SIMT // works... row_ids[j] = i; } } }; Eval(c, num_elems + 1, lambda_phase_one); auto lambda_phase_two = [=] __host__ __device__(int32_t j) { int32_t row_index = row_ids[j]; if (row_index != -1) return; int32_t power = 0, j2 = j; for (; row_index != -1; power++) { if (j2 & (1 << power)) { j2 -= (1 << power); row_index = row_ids[j2]; } assert(power < 31); } row_ids[j] = row_ids[j2]; }; // could do the next line for num_elems+1, but the element at `num_elems` // will already be set. Eval(c, num_elems, lambda_phase_two); } } } /* When we invoke this we make a big enough grid that there doesn't have to be a loop over elements, i.e. (gridDim.x * blockDim.x) / threads_per_elem > num_elems. (must be >=, because we imagine a phantom element at [num_elems] with the value `num_rows`.) @param [in] num_elems Number of elements in ragged matrix @param [in] threads_per_elem Number of threads we allocate per element. Must be >= 1. @param [in] row_ids The row_ids vector, of length `num_elems`; must be nonnegative and non-decreasing and all elements < num_rows. @param [in] num_rows Number of rows, must be greater than the largest (== last) element of `row_ids`. @param [out] row_splits This kernel will output a non-decreasing vector of length num_rows + 1, such that row_splits[0] == 0, row_splits[num_rows] == num_elems, and row_splits[row_ids[i]] <= i < row_splits[row_ids[i]+1] */ __global__ void RowIdsToRowSplitsKernel(int32_t num_elems, int32_t threads_per_elem, const int32_t *row_ids, int32_t num_rows, int32_t *row_splits) { int32_t thread = (blockIdx.x * blockDim.x + threadIdx.x), num_threads = gridDim.x * blockDim.x, elem = thread / threads_per_elem, thread_this_elem = thread % threads_per_elem; K2_CHECK_GE(num_threads / threads_per_elem, num_elems); if (elem > num_elems) return; int32_t this_row, prev_row; if (elem == 0) { prev_row = -1; this_row = row_ids[elem]; } else if (elem == num_elems) { prev_row = row_ids[elem - 1]; this_row = num_rows; } else { prev_row = row_ids[elem - 1]; this_row = row_ids[elem]; } // `num_splits` is the number of splits we have to write, usually 0 or 1 // but in principle unlimited as there could be empty rows. The // relationship between row_ids and row_splits is more symmetric than // you might expect. int32_t num_splits = this_row - prev_row; const int32_t max_loop = 8; // `max_loop` is heuristically chosen. if (num_splits / threads_per_elem > max_loop) { if (thread_this_elem == 0) { FillValues(row_splits + prev_row + 1, num_splits, elem); } } else { // TODO(dan): figure out how to unroll this? for (; thread_this_elem < num_splits; thread_this_elem += threads_per_elem) row_splits[prev_row + 1 + thread_this_elem] = elem; } } // see declaration in utils.h for documentation. void RowIdsToRowSplits(ContextPtr c, int32_t num_elems, const int32_t *row_ids, bool no_empty_rows, int32_t num_rows, int32_t *row_splits) { NVTX_RANGE(K2_FUNC); // process corner case first if (num_elems == 0) { auto lambda_set_values = [=] __host__ __device__(int32_t i) { row_splits[i] = 0; }; Eval(c, num_rows + 1, lambda_set_values); return; } DeviceType d = c->GetDeviceType(); if (d == kCpu) { int32_t cur_row = -1; for (int32_t i = 0; i < num_elems; i++) { int32_t row = row_ids[i]; K2_CHECK_GE(row, cur_row); while (cur_row < row) { cur_row++; row_splits[cur_row] = i; } } // cur_row must be >= 0 here as num_elems > 0 K2_CHECK_GE(cur_row, 0); while (cur_row < num_rows) { row_splits[++cur_row] = num_elems; } } else { K2_CHECK_EQ(d, kCuda); if (no_empty_rows) { auto lambda_simple = [=] __host__ __device__(int32_t i) { int32_t this_row = row_ids[i], prev_row; if (i > 0) { // (normal case) prev_row = row_ids[i - 1]; } else { // i == 0 row_splits[num_rows] = num_elems; prev_row = -1; } K2_CHECK_LE(this_row, prev_row + 1); // no_empty_rows was asserted by // the user if (this_row > prev_row) { row_splits[this_row] = i; } }; Eval(c, num_elems, lambda_simple); return; } else { // By doing "+ 2" instead of "+ 1" we increase the minimum number of // threads-per-row, which may reduce latency when there are successive // empty rows. Any value >= 1 is correct though. int32_t avg_rows_per_elem = num_rows / num_elems + 2, threads_per_elem = RoundUpToNearestPowerOfTwo(avg_rows_per_elem), tot_threads = (num_elems + 1) * threads_per_elem; // +1 for the last row int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(RowIdsToRowSplitsKernel<<<grid_size, block_size, 0, c->GetCudaStream()>>>( num_elems, threads_per_elem, row_ids, num_rows, row_splits)); } } } /* Called inside GetTaskRedirect(); see documentation of that in header. Each task with 0 <= task < num_tasks gets allocated `threads_per_job` threads, e.g. threads_per_job = 4 or 16. It's a kind of n-ary search (generalization of binary search) where each branch is handled by a different thread so they can happen in parallel. TODO(dan): there are a lot of opportunities to further optimize this using GPU hardware tricks. The thread-block size this is called with must be jobs_per_block * threads_per_job. */ /* template <int32_t jobs_per_block, int32_t threads_per_job> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { __shared__ int32_t temp[tasks_per_block]; // we do __syncwarp() for synchronization below; we require threads_per_job <= // 32 for this reason. static_assert(threads_per_job >= 2 && threads_per_job <= 32); // We have work to do for 0 <= job_idx < num_tasks, but be careful: job_idx // may be >= num_tasks if num_tasks is small or not a power of two (we don't // return because we need to do __syncwarp()). So we have to avoid out of // bounds memory access. int32_t job_idx = (blockIdx.x * blockDim.x + threadIdx.x) / threads_per_job; // `branch_idx` is which member we are of the group of the `threads_per_job` threads for this job. int32_t branch_idx = threadIdx.x % threads_per_job; // we assume blockDim.x % threads_per_job == 0 // `temp_idx` is which index in the temporary storage `temp` we are assigned // (one per job). int32_t temp_idx = threadIdx.x / threads_per_job; // TODO: we may at some point decide that row_splits[0] has to be zero. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; if (num_items <= 0) { assert(num_items == 0); // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_job >= 2); if (branch_idx < 2 && job_idx < num_tasks) { TaskRedirect tr { job_idx, 2, branch_idx }; redirect_out[job_idx + branch_idx * num_tasks] = tr; } return; } else if (branch_idx == 0 && job_idx < num_tasks) { // This code writes to the jobs in the first half of the output array, // that are allocated to the same-numbered task. int32_t task_idx = job_idx, this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation); TaskRedirect tr { task_idx, num_jobs_this_task, 0 }; redirect_out[task_idx] = tr; } // Now we have the less-trivial task of assigning the jobs in the 2nd half of the // output array to tasks (these are allocated roughly proportional to the amount // of work to do for that task). // We do the selection by throwing darts at a dart-board, evenly spaced, and seeing which task they correspond // to. There are `num_tasks` darts). // Note: we know dart_location < row_splits_nt because job_idx < num_tasks and // because integer division rounds down. int32_t dart_separation = num_items / num_tasks, dart_location = row_splits0 + job_idx * dart_separation; // OK, from this point the goal is to find a task_idx such that // row_splits[task_idx] <= dart_location < row_splits[task_idx + 1]. // This is guaranteed to exist, as long as job_id < num_tasks. // As long as job_id < num_tasks, we maintain the property that // row_splits[lower_bound] <= dart_location && // (upper_bound > num_tasks || row_splits[upper_bound] > dart_location). // (where upper_bound == lower_bound + range), i.e. they are truly // lower and upper bounds int32_t lower_bound = 0, range = num_tasks; // we are responsible for items lower_bound through // (upper_bound = lower_bound + range) - 1. while (range > threads_per_job) { int32_t upper_bound = lower_bound + range; // We need to narrow the range of `task_idx` that might be the correct one. // We round *up* because we require that task_idx_step * threads_per_job >= // range, so that we cover the entire range. int32_t task_idx_step = (range + threads_per_job - 1) / threads_per_job, // >= 2 my_lower_task_idx = lower_bound + branch_idx * task_idx_step, my_upper_task_idx = my_lower_task_idx + task_idx_step; // The following avoids out-of-bounds memory accesses. if (my_upper_task_idx > upper_bound) my_upper_task_idx = upper_bound; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. if (my_lower_task_idx < num_tasks && row_splits[my_lower_task_idx] <= dart_location && dart_location < row_splits[my_upper_task_idx]) { // I am the "chosen branch" (exactly one will be chosen, as long as // job_idx < num_tasks). temp[temp_idx] = branch_idx; } __syncwarp(); int32_t chosen_branch_idx = temp[temp_idx]; lower_bound = lower_bound + chosen_branch_idx * task_idx_step; upper_bound = lower_bound + task_idx_step; range = task_idx_step; // note, we don't limit upper_bound to be <= num_tasks because we need all // threads in the block to go around the while loop the same number of // times. Therefore it's possible that upper_bound > num_tasks. K2_DASSERT(job_idx >= num_tasks || (row_splits[lower_bound] <= dart_location && (upper_bound > num_tasks || row_splits[upper_bound] > dart_location))); // TODO: remove once debugged. } int32_t task_idx = lower_bound + branch_idx; // TODO (dan): it may be possible to use one of those special within-warp // commands involving bitmaps to make the second comparison (dart_location < // row_splits[my_upper_task_idx]) unnecessary. // // The check `task_idx < num_tasks` is to avoid out-of-bounds access of row_splits. // The check `job_idx < num_tasks` is to avoid out-of-bounds access of `redirect_out`; // for these out-of-range job_idx values, it's possible for task_idx to have // any value since it may be uninitialized memory. if (task_idx < num_tasks && job_idx < num_tasks) { int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; if (this_row_split <= dart_location && dart_location < next_row_split) { // OK, exactly one branch per job will reach this point. `num_jobs` below // is the number of jobs that will be active for this task. (The "1 // +".. is the job that we assign for each task, one job per task, in the // "first half" of the jobs). The job_id_this_task we're working out // below is the job_id within the second half of the TaskRedirects, // the half that are allocated by throwing darts. int32_t num_jobs_this_task = 1 + (next_row_split/dart_separation - this_row_split/dart_separation), job_idx_this_task = 1 + (dart_location - this_row_split)/dart_separation; K2_CHECK(job_id_this_task < num_jobs_this_task); TaskRedirect tr { task_idx, num_jobs_this_task, job_idx_this_task }; redirect_out[num_tasks + job_idx] = tr; } } } */ /* This is a quite simple implementation of GetTaskRedirect... I had a more complicated one above that had better O(N) performance for hard cases, but this one will handle more normal/smaller cases better, plus is easier to debug. The basic idea is to throw lots of threads at it, i.e. threads_per_task should be, say, twice larger than the average / expected number of jobs per task, so that if a task has lots of jobs it doesn't have to loop too many times. */ template <int32_t threads_per_task> __global__ void GetTaskRedirect(int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { int32_t thread = blockIdx.x * blockDim.x + threadIdx.x; int32_t task_idx = thread / threads_per_task; if (task_idx >= num_tasks) return; // `thread_idx` is which member we are of the group of the `threads_per_job` // threads for this job. int32_t thread_idx = thread % threads_per_task; int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0; // the 'num_items' is the // total amount of work to // do, that we want to // distribute fairly evenly. // The idea with `dart_separation` is this: Half of the jobs we allocate to // the corresponding tasks. The other half we allocate by throwing darts onto // the interval [0, num_items - 1], evenly spaced starting from 0, and seeing // which tasks they land in. This is somewhat random but it ensures that if // any task has a very large amount of work to do, it will get a roughly // proportionate number of jobs. int32_t dart_separation = num_items / num_tasks; if (dart_separation <= 0) { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return static_assert(threads_per_task >= 2, "threads per task must >= 2"); if (thread_idx < 2) { TaskRedirect tr{task_idx, 2, static_cast<uint16_t>(thread_idx)}; redirect_out[task_idx + thread_idx * num_tasks] = tr; } return; } // TODO(dan): IDK how well the hardware combines these memory requests; could // consider loading to shared memory first. int32_t this_row_split = row_splits[task_idx], next_row_split = row_splits[task_idx + 1]; // `num_jobs` below is the number of jobs that will be active for // this task. (The "1 +".. is the job that we assign for each // task, one job per task, in the "first half" of the jobs). // the job_idx we're working out below is the job_idx for the // "second half" of int32_t num_jobs_this_task = 1 + (min(next_row_split / dart_separation, num_tasks) - min(this_row_split / dart_separation, num_tasks)); // function `min` is from cuda K2_CHECK_EQ(static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = thread_idx; job_id_this_task < num_jobs_this_task; job_id_this_task += threads_per_task) { int32_t job_idx = (job_id_this_task == 0 ? task_idx : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task_idx, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } void GetTaskRedirect(cudaStream_t stream, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { NVTX_RANGE(K2_FUNC); if (num_tasks <= 0) return; if (stream == kCudaStreamInvalid) { // there's not much point in using this on CPU as there are better ways // to do things (sequentially), but this can be useful for debugging. // The idea with `dart_separation` is this: Half of the jobs we allocate // to the corresponding tasks. The other half we allocate by throwing // darts onto the interval [0, num_items - 1], evenly spaced starting from // 0, and seeing which tasks they land in. This is somewhat random but it // ensures that if any task has a very large amount of work to do, it will // get a roughly proportionate number of jobs. int32_t row_splits0 = row_splits[0], row_splits_nt = row_splits[num_tasks], num_items = row_splits_nt - row_splits0, dart_separation = num_items / num_tasks; if (dart_separation != 0) { for (int32_t task = 0; task < num_tasks; ++task) { int32_t this_row_split = row_splits[task], next_row_split = row_splits[task + 1]; int32_t num_jobs_this_task = 1 + (std::min(next_row_split / dart_separation, num_tasks) - std::min(this_row_split / dart_separation, num_tasks)); K2_CHECK_EQ( static_cast<int32_t>(static_cast<uint16_t>(num_jobs_this_task)), num_jobs_this_task); for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = (job_id_this_task == 0 ? task : // 1st half num_tasks + (this_row_split / dart_separation) + job_id_this_task - 1); // 2nd half. redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } else { // This is a special case where there is no work to do; we give a trivial // assignment of tasks to jobs and return for (int32_t task = 0; task < num_tasks; ++task) { int32_t num_jobs_this_task = 2; for (int32_t job_id_this_task = 0; job_id_this_task < num_jobs_this_task; ++job_id_this_task) { int32_t job_idx = task + job_id_this_task * num_tasks; redirect_out[job_idx] = TaskRedirect{task, static_cast<uint16_t>(num_jobs_this_task), static_cast<uint16_t>(job_id_this_task)}; } } } } else { // compare 8 to 2, which is the expected number of jobs per task. having // 8 substantially greater than 2 gives a fairly big safety factor. // However this is still far from ideal in scenarios where the number of // tasks might be highly unbalanced. const int32_t threads_per_task = 8, tot_threads = threads_per_task * num_tasks; int32_t block_size = 256; int32_t grid_size = NumBlocks(tot_threads, block_size); K2_CUDA_SAFE_CALL(GetTaskRedirect<threads_per_task> <<<grid_size, block_size, 0, stream>>>( num_tasks, row_splits, redirect_out)); } } void GetTaskRedirect(ContextPtr &c, int32_t num_tasks, const int32_t *row_splits, TaskRedirect *redirect_out) { GetTaskRedirect(c->GetCudaStream(), num_tasks, row_splits, redirect_out); } } // namespace k2
cf3129a1ae8994737ca9d148403ccfc9772afbc8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include <layers/fused_relu_bias_fully_connected_layer.hpp> #include <linalg/reduce.cuh> #include <utils.cuh> #include <utils.hpp> #include "common.hpp" namespace HugeCTR { namespace { template <int BLOCK_WIDTH> __global__ void reverse_add_bias_and_re_kernel(float* bias, __half* dRelu, __half* middle, const __half* top, int ldn) { __shared__ __half2 elem[32][BLOCK_WIDTH + 1]; __shared__ __half2 accu[BLOCK_WIDTH]; const __half2 zero = TypeFunc<__half2>::zero(); __half2* middle2 = reinterpret_cast<__half2*>(middle); __half2* dRelu2 = reinterpret_cast<__half2*>(dRelu); const __half2* top2 = reinterpret_cast<const __half2*>(top); int lx, ly, gi; int gx_offset = blockIdx.x * BLOCK_WIDTH; int gy_offset = blockIdx.y * 32; for (int i = 0; i < BLOCK_WIDTH * 32; i += blockDim.x) { lx = threadIdx.x % BLOCK_WIDTH; ly = (i + threadIdx.x) / BLOCK_WIDTH; gi = (ly + gy_offset) * ldn + (lx + gx_offset); __half2 t = middle2[gi]; __half2 mask = __hgt2(t, zero); t = __hmul2(__ldg(top2 + gi), mask); dRelu2[gi] = t; elem[ly][lx] = t; } __syncthreads(); for (int i = 0; i < BLOCK_WIDTH * 32; i += blockDim.x) { lx = (i + threadIdx.x) / 32; ly = threadIdx.x % 32; __half2 val = warpReduceSum(elem[ly][lx]); if (ly == 0) { accu[lx] = val; } } __syncthreads(); if (threadIdx.x < BLOCK_WIDTH * 2) { __half2 val = accu[threadIdx.x / 2]; float fval = (threadIdx.x % 2 == 0) ? __low2float(val) : __high2float(val); atomicAdd(bias + gx_offset * 2 + threadIdx.x, fval); } } } // namespace FusedReluBiasFullyConnectedLayer::FusedReluBiasFullyConnectedLayer( const std::shared_ptr<BufferBlock2<float>>& master_weights_buff, const std::shared_ptr<BufferBlock2<__half>>& weights_buff, const std::shared_ptr<BufferBlock2<__half>>& weights_grad_buff, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, const Tensor2<__half>& train_in_tensor, const Tensor2<__half>& mask_in_tensor, const Tensor2<__half>& dRelu_in_tensor, const Tensor2<__half>& db_in_tensor, const Tensor2<__half>& train_out_tensor, const Tensor2<__half>& mask_out_tensor, const Tensor2<__half>& dRelu_out_tensor, Tensor2<__half>& db_out_tensor, const std::shared_ptr<GPUResource>& gpu_resource, const FcPosition_t& pos, const Activation_t& act, const bool& skip_dgrad, std::vector<Initializer_t> initializer_types) : Layer(gpu_resource, initializer_types), balgo_k_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), balgo_x_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), balgo_b_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), pos_(pos), act_(act), skip_dgrad_(skip_dgrad) { const auto& bottom_tensor_dim = train_in_tensor.get_dimensions(); const auto& top_tensor_dim = train_out_tensor.get_dimensions(); if (bottom_tensor_dim.size() != 2 || top_tensor_dim.size() != 2) { CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions"); } size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; std::vector<size_t> kernel_dim = {k, n}; std::vector<size_t> bias_dim = {1, n}; std::vector<size_t> identity_dim = {1, m}; { Tensor2<float> tensor; master_weights_buff->reserve(kernel_dim, &tensor); weights_.push_back(tensor); } { Tensor2<float> tensor; master_weights_buff->reserve(bias_dim, &tensor); weights_.push_back(tensor); } { Tensor2<__half> tensor; weights_buff->reserve(kernel_dim, &tensor); weights_half_.push_back(tensor); } { Tensor2<__half> tensor; weights_buff->reserve(bias_dim, &tensor); weights_half_.push_back(tensor); } { Tensor2<__half> tensor; weights_grad_buff->reserve(kernel_dim, &tensor); weights_grad_.push_back(tensor); } { Tensor2<__half> tensor; weights_grad_buff->reserve(bias_dim, &db_out_tensor); weights_grad_.push_back(db_out_tensor); } blobs_buff->reserve(identity_dim, &identity_tensor_); train_in_tensor_ = train_in_tensor; if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated) mask_in_tensor_ = train_in_tensor; else { mask_in_tensor_ = mask_in_tensor; dRelu_in_tensor_ = dRelu_in_tensor; db_in_tensor_ = db_in_tensor; } train_out_tensor_ = train_out_tensor; mask_out_tensor_ = mask_out_tensor; dRelu_out_tensor_ = dRelu_out_tensor; db_out_tensor_ = db_out_tensor; blobs_buff->reserve(kernel_dim, &bias_grad_tensor_); std::vector<size_t> mask_dim = {m, n}; blobs_buff->reserve(mask_dim, &mask_in_tensor_temp_); } void FusedReluBiasFullyConnectedLayer::initialize() { // TODO: We need different bottom desc based on is_train or not const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); __half* identity = identity_tensor_.get_ptr(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; hipLaunchKernelGGL(( initialize_array), dim3((m - 1) / 1024 + 1), dim3(1024), 0, get_gpu().get_stream(), identity, m, __float2half(1.0f)); CK_CUBLAS_THROW_(cublasLtMatmulDescCreate(&cublas_op_desc_, CUBLAS_COMPUTE_32F, HIP_R_32F)); hipblasOperation_t trans = HIPBLAS_OP_N; CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSA, &trans, sizeof(trans))); CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSB, &trans, sizeof(trans))); cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_RELU_AUX_BIAS; if (act_ == Activation_t::None) epi = CUBLASLT_EPILOGUE_BIAS; CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi))); const __half* bias = weights_half_[1].get_ptr(); CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute( cublas_op_desc_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias))); if (act_ != Activation_t::None) { __half* reluMask = mask_out_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER, &reluMask, sizeof(reluMask)); long reluMaskLd = n; cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD, &reluMaskLd, sizeof(reluMaskLd)); } CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_kernel_desc_, HIP_R_16F, n, k, n)); CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_bottom_desc_, HIP_R_16F, k, m, k)); CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_top_desc_, HIP_R_16F, n, m, n)); CK_CUBLAS_THROW_(cublasLtMatmulPreferenceCreate(&cublas_preference_)); cublaslt_workspace_size_ = 1024 * 1024 * 16; // Set it to 8MB for now CK_CUDA_THROW_(hipMalloc(&cublaslt_workspace_, cublaslt_workspace_size_)); CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute( cublas_preference_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_, sizeof(cublaslt_workspace_size_))); uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST; CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(cublas_preference_, CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK, &pointer_mode, sizeof(pointer_mode))); // By default set algo to best estimated heurstic cublasLtMatmulHeuristicResult_t heuristic_result; int returned_res = 0; CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_, cublas_top_desc_, cublas_top_desc_, cublas_preference_, 1, &heuristic_result, &returned_res)); memcpy(&falgo_k_, &heuristic_result.algo, sizeof(falgo_k_)); if (returned_res == 0) { CK_CUBLAS_THROW_(HIPBLAS_STATUS_NOT_SUPPORTED); } initialize_bprop(); } void FusedReluBiasFullyConnectedLayer::initialize_bprop() { // TODO: We need different bottom desc based on is_train or not const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; CK_CUBLAS_THROW_( cublasLtMatmulDescCreate(&cublas_op_desc_bprop_, CUBLAS_COMPUTE_32F, HIP_R_32F)); hipblasOperation_t transA = HIPBLAS_OP_T; hipblasOperation_t transB = HIPBLAS_OP_N; CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute( cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSA, &transA, sizeof(transA))); CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute( cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSB, &transB, sizeof(transB))); if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated) { cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DEFAULT; CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute( cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi))); } else if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) { cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DRELU_BGRAD; cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi)); __half* bgrad = db_in_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bgrad, sizeof(bgrad)); __half* reluMask = mask_in_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER, &reluMask, sizeof(reluMask)); long reluMaskLd = k; cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD, &reluMaskLd, sizeof(reluMaskLd)); } CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_dRelu_top_desc_, HIP_R_16F, n, m, n)); CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_dRelu_bottom_desc_, HIP_R_16F, k, m, k)); CK_CUBLAS_THROW_(cublasLtMatmulPreferenceCreate(&cublas_preference_dRelu_)); cublaslt_workspace_size_ = 1024 * 1024 * 8; // Set it to 8MB for now CK_CUDA_THROW_(hipMalloc(&cublaslt_workspace_dRelu_, cublaslt_workspace_size_)); CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute( cublas_preference_dRelu_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_, sizeof(cublaslt_workspace_size_))); uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST; CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(cublas_preference_, CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK, &pointer_mode, sizeof(pointer_mode))); // By default set algo to best estimated heurstic cublasLtMatmulHeuristicResult_t heuristic_result; int returned_res = 0; CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_, cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_, cublas_preference_dRelu_, 1, &heuristic_result, &returned_res)); memcpy(&balgo_dRelu_, &heuristic_result.algo, sizeof(balgo_dRelu_)); if (returned_res == 0) { CK_CUBLAS_THROW_(HIPBLAS_STATUS_NOT_SUPPORTED); } } void FusedReluBiasFullyConnectedLayer::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.start", get_gpu().get_stream()); const __half* kernel = weights_half_[0].get_ptr(); const __half* bias = weights_half_[1].get_ptr(); const __half* bottom = get_bottom_tensor_fprop(is_train).get_ptr(); __half* top_fprop = train_out_tensor_.get_ptr(); __half* mask_out = mask_out_tensor_.get_ptr(); const auto& bottom_tensor_dim = get_bottom_tensor_fprop(is_train).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; const float alpha = 1.0f; const float beta = 0.0f; PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.start", get_gpu().get_stream()); CK_CUBLAS_THROW_(cublasLtMatmul( get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel, cublas_kernel_desc_, bottom, cublas_bottom_desc_, &beta, top_fprop, cublas_top_desc_, top_fprop, cublas_top_desc_, &falgo_k_, cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream())); PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.stop", get_gpu().get_stream()); if ((pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) && act_ != Activation_t::None) { size_t len = train_out_tensor_.get_num_elements(); CK_CUDA_THROW_(hipMemcpyAsync(mask_out, top_fprop, len * sizeof(__half), hipMemcpyDeviceToDevice, get_gpu().get_stream())); } PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.stop", get_gpu().get_stream()); #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } void FusedReluBiasFullyConnectedLayer::bprop() { CudaDeviceContext context(get_device_id()); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.start", get_gpu().get_stream()); const __half* kernel = weights_half_[0].get_ptr(); __half* mask_in = mask_in_tensor_.get_ptr(); const __half* train_out = train_out_tensor_.get_ptr(); __half* mask_out = mask_out_tensor_.get_ptr(); __half* kernel_grad = weights_grad_[0].get_ptr(); __half* bias_grad = weights_grad_[1].get_ptr(); const __half* bottom = get_bottom_tensor_fprop(true).get_ptr(); __half* bottom_bprop = get_bottom_tensor_bprop(true).get_ptr(); float* bias_grad_float = bias_grad_tensor_.get_ptr(); __half* dRelu_top = dRelu_out_tensor_.get_ptr(); const __half* identity = identity_tensor_.get_ptr(); const auto& bottom_tensor_dim = get_bottom_tensor_bprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; const float alpha = 1.0f; const float beta_k = 1.0f; const float beta_x = 0.0f; const float beta_b = 0.0f; PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.reverse_add_bias_and_re_kernel.start", get_gpu().get_stream()); if (pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) { if (act_ == Activation_t::None) { CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, 1, m, &alpha, train_out, HIP_R_16F, n, identity, HIP_R_16F, m, &beta_b, bias_grad, HIP_R_16F, n, HIP_R_32F, balgo_b_)); } else { hipLaunchKernelGGL(( initialize_array), dim3((n - 1) / 1024 + 1), dim3(1024), 0, get_gpu().get_stream(), bias_grad_float, n, 0.0f); dim3 blocks(n / 64, m / 32); hipLaunchKernelGGL(( reverse_add_bias_and_re_kernel<32>), dim3(blocks), dim3(512), 0, get_gpu().get_stream(), bias_grad_float, dRelu_top, mask_out, train_out, n / 2); hipLaunchKernelGGL(( convert_array), dim3((n - 1) / 1024 + 1), dim3(1024), 0, get_gpu().get_stream(), bias_grad, bias_grad_float, n); } } PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.reverse_add_bias_and_re_kernel.stop", get_gpu().get_stream()); if (act_ == Activation_t::None) { dRelu_top = train_out_tensor_.get_ptr(); } PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_1.start", get_gpu().get_stream()); CK_CUBLAS_THROW_(hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m, &alpha, dRelu_top, HIP_R_16F, n, bottom, HIP_R_16F, k, &beta_k, kernel_grad, HIP_R_16F, n, HIP_R_32F, balgo_k_)); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_1.stop", get_gpu().get_stream()); if (skip_dgrad_) { PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.stop", get_gpu().get_stream()); return; } if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) { bottom_bprop = dRelu_in_tensor_.get_ptr(); } PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.start", get_gpu().get_stream()); CK_CUBLAS_THROW_(cublasLtMatmul( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha, kernel, cublas_kernel_desc_, dRelu_top, cublas_dRelu_top_desc_, &beta_x, bottom_bprop, cublas_dRelu_bottom_desc_, bottom_bprop, cublas_dRelu_bottom_desc_, &balgo_dRelu_, cublaslt_workspace_dRelu_, cublaslt_workspace_size_, get_gpu().get_stream())); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.stop", get_gpu().get_stream()); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.stop", get_gpu().get_stream()); #ifndef NDEBUG hipDeviceSynchronize(); CK_CUDA_THROW_(hipGetLastError()); #endif } void FusedReluBiasFullyConnectedLayer::search_algorithm() { // Set to the CUDA device where this layer assigned to CudaDeviceContext context(get_device_id()); const size_t repeat_num = 100; const int max_algo_count = 16; // Device Tensors to be used __half* bottom = get_bottom_tensor_fprop(true).get_ptr(); __half* top = train_out_tensor_.get_ptr(); __half* kernel = weights_half_[0].get_ptr(); __half* bias = weights_half_[1].get_ptr(); __half* kernel_grad = weights_grad_[0].get_ptr(); __half* bias_grad = weights_grad_[1].get_ptr(); __half* identity = identity_tensor_.get_ptr(); // Tensor dim const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; // Record time for each algorithm float shortestTime = std::numeric_limits<float>::max(); float time; hipEvent_t start, stop; CK_CUDA_THROW_(hipEventCreate(&start)); CK_CUDA_THROW_(hipEventCreate(&stop)); cublasLtMatmulHeuristicResult_t heuristic_result[max_algo_count] = {0}; int algo_count = 0; CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_, cublas_top_desc_, cublas_top_desc_, cublas_preference_, max_algo_count, heuristic_result, &algo_count)); if (algo_count == 0) { CK_CUBLAS_THROW_(HIPBLAS_STATUS_NOT_SUPPORTED); } // if(get_device_id()==0) printf("M: %d, N: %d, K: %d\n", m, n, k); for (int algoIdx = 0; algoIdx < algo_count; algoIdx++) { hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) { status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel, cublas_kernel_desc_, bottom, cublas_bottom_desc_, &beta, top, cublas_top_desc_, top, cublas_top_desc_, &heuristic_result[algoIdx].algo, cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream()); } CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(hipEventSynchronize(stop)); CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != HIPBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo); continue; } // if(get_device_id()==0) printf("Algo: %d, wavesCount: %f, time: %f\n", // (int)heuristic_result[algoIdx].algo, // heuristic_result[algoIdx].wavesCount, // time); // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; memcpy(&falgo_k_, &heuristic_result[algoIdx].algo, sizeof(falgo_k_)); // if(get_device_id()==0) printf("Picked algorithm: %d", heuristic_result[algoIdx].algo); } } // dRelu in backward pass // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); cublasLtMatmulHeuristicResult_t heuristic_result_dRelu[max_algo_count] = {0}; int algo_count_dRelu = 0; CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_, cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_, cublas_preference_dRelu_, max_algo_count, heuristic_result_dRelu, &algo_count_dRelu)); if (algo_count_dRelu == 0) { CK_CUBLAS_THROW_(HIPBLAS_STATUS_NOT_SUPPORTED); } for (int algoIdx = 0; algoIdx < algo_count_dRelu; algoIdx++) { hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) { status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha, kernel, cublas_kernel_desc_, top, cublas_dRelu_top_desc_, &beta, bottom, cublas_dRelu_bottom_desc_, bottom, cublas_dRelu_bottom_desc_, &heuristic_result_dRelu[algoIdx].algo, cublaslt_workspace_dRelu_, cublaslt_workspace_size_, get_gpu().get_stream()); } CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(hipEventSynchronize(stop)); CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != HIPBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; memcpy(&balgo_dRelu_, &heuristic_result_dRelu[algoIdx].algo, sizeof(balgo_dRelu_)); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Start, end for search const hipblasGemmAlgo_t startAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; const hipblasGemmAlgo_t endAlgo = CUBLAS_GEMM_ALGO15_TENSOR_OP; // Search all the algorithm for balgo_k_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 1.0f; // Record start event CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) { status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_T, n, k, m, &alpha, top, HIP_R_16F, n, bottom, HIP_R_16F, k, &beta, kernel_grad, HIP_R_16F, n, HIP_R_32F, static_cast<hipblasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(hipEventSynchronize(stop)); CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != HIPBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_k_ = static_cast<hipblasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Search all the algorithm for balgo_b_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; // Record start event CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) { status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_N, HIPBLAS_OP_N, n, 1, m, &alpha, top, HIP_R_16F, n, identity, HIP_R_16F, m, &beta, bias_grad, HIP_R_16F, n, HIP_R_32F, static_cast<hipblasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(hipEventSynchronize(stop)); CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != HIPBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_b_ = static_cast<hipblasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Search all the algorithm for balgo_x_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { hipblasStatus_t status = HIPBLAS_STATUS_SUCCESS; const __half alpha = 1.0f; const __half beta = 0.0f; // Record start event CK_CUDA_THROW_(hipEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == HIPBLAS_STATUS_SUCCESS; ++i) { status = hipblasGemmEx(get_gpu().get_cublas_handle(), HIPBLAS_OP_T, HIPBLAS_OP_N, k, m, n, &alpha, kernel, HIP_R_16F, n, top, HIP_R_16F, n, &beta, bottom, HIP_R_16F, k, HIP_R_32F, static_cast<hipblasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(hipEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(hipEventSynchronize(stop)); CK_CUDA_THROW_(hipEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != HIPBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_Xn, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_x_ = static_cast<hipblasGemmAlgo_t>(testAlgo); } } // Print selection information // printf("The algorithm selection for falgo_k_, balgo_k_, balgo_x_ are: %d, %d and %d.\n", // (int)falgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP, // (int)balgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP, // (int)balgo_x_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP); // Output msg // MESSAGE_("The fully-connected layer has finished choosing the algorithm for cublas Gemm."); // Clean-up CK_CUDA_THROW_(hipEventDestroy(start)); CK_CUDA_THROW_(hipEventDestroy(stop)); } // namespace HugeCTR std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_uniform_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim); return std::make_unique<UniformDataSimulator>(-1 * limit, limit); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_uniform_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_norm_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_default_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; std::unique_ptr<DataSimulator> simu(nullptr); if (0 == index) { simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, bottom_dim, top_dim)); } else if (1 == index) { float stddev = sqrt(1.f / top_dim); simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev)); } else { CK_THROW_(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } } // namespace HugeCTR
cf3129a1ae8994737ca9d148403ccfc9772afbc8.cu
/* * Copyright (c) 2020, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <cstdio> #include <layers/fused_relu_bias_fully_connected_layer.hpp> #include <linalg/reduce.cuh> #include <utils.cuh> #include <utils.hpp> #include "common.hpp" namespace HugeCTR { namespace { template <int BLOCK_WIDTH> __global__ void reverse_add_bias_and_re_kernel(float* bias, __half* dRelu, __half* middle, const __half* top, int ldn) { __shared__ __half2 elem[32][BLOCK_WIDTH + 1]; __shared__ __half2 accu[BLOCK_WIDTH]; const __half2 zero = TypeFunc<__half2>::zero(); __half2* middle2 = reinterpret_cast<__half2*>(middle); __half2* dRelu2 = reinterpret_cast<__half2*>(dRelu); const __half2* top2 = reinterpret_cast<const __half2*>(top); int lx, ly, gi; int gx_offset = blockIdx.x * BLOCK_WIDTH; int gy_offset = blockIdx.y * 32; for (int i = 0; i < BLOCK_WIDTH * 32; i += blockDim.x) { lx = threadIdx.x % BLOCK_WIDTH; ly = (i + threadIdx.x) / BLOCK_WIDTH; gi = (ly + gy_offset) * ldn + (lx + gx_offset); __half2 t = middle2[gi]; __half2 mask = __hgt2(t, zero); t = __hmul2(__ldg(top2 + gi), mask); dRelu2[gi] = t; elem[ly][lx] = t; } __syncthreads(); for (int i = 0; i < BLOCK_WIDTH * 32; i += blockDim.x) { lx = (i + threadIdx.x) / 32; ly = threadIdx.x % 32; __half2 val = warpReduceSum(elem[ly][lx]); if (ly == 0) { accu[lx] = val; } } __syncthreads(); if (threadIdx.x < BLOCK_WIDTH * 2) { __half2 val = accu[threadIdx.x / 2]; float fval = (threadIdx.x % 2 == 0) ? __low2float(val) : __high2float(val); atomicAdd(bias + gx_offset * 2 + threadIdx.x, fval); } } } // namespace FusedReluBiasFullyConnectedLayer::FusedReluBiasFullyConnectedLayer( const std::shared_ptr<BufferBlock2<float>>& master_weights_buff, const std::shared_ptr<BufferBlock2<__half>>& weights_buff, const std::shared_ptr<BufferBlock2<__half>>& weights_grad_buff, const std::shared_ptr<GeneralBuffer2<CudaAllocator>>& blobs_buff, const Tensor2<__half>& train_in_tensor, const Tensor2<__half>& mask_in_tensor, const Tensor2<__half>& dRelu_in_tensor, const Tensor2<__half>& db_in_tensor, const Tensor2<__half>& train_out_tensor, const Tensor2<__half>& mask_out_tensor, const Tensor2<__half>& dRelu_out_tensor, Tensor2<__half>& db_out_tensor, const std::shared_ptr<GPUResource>& gpu_resource, const FcPosition_t& pos, const Activation_t& act, const bool& skip_dgrad, std::vector<Initializer_t> initializer_types) : Layer(gpu_resource, initializer_types), balgo_k_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), balgo_x_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), balgo_b_(CUBLAS_GEMM_DEFAULT_TENSOR_OP), pos_(pos), act_(act), skip_dgrad_(skip_dgrad) { const auto& bottom_tensor_dim = train_in_tensor.get_dimensions(); const auto& top_tensor_dim = train_out_tensor.get_dimensions(); if (bottom_tensor_dim.size() != 2 || top_tensor_dim.size() != 2) { CK_THROW_(Error_t::WrongInput, "input or output tensor doesn't has two dimensions"); } size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; std::vector<size_t> kernel_dim = {k, n}; std::vector<size_t> bias_dim = {1, n}; std::vector<size_t> identity_dim = {1, m}; { Tensor2<float> tensor; master_weights_buff->reserve(kernel_dim, &tensor); weights_.push_back(tensor); } { Tensor2<float> tensor; master_weights_buff->reserve(bias_dim, &tensor); weights_.push_back(tensor); } { Tensor2<__half> tensor; weights_buff->reserve(kernel_dim, &tensor); weights_half_.push_back(tensor); } { Tensor2<__half> tensor; weights_buff->reserve(bias_dim, &tensor); weights_half_.push_back(tensor); } { Tensor2<__half> tensor; weights_grad_buff->reserve(kernel_dim, &tensor); weights_grad_.push_back(tensor); } { Tensor2<__half> tensor; weights_grad_buff->reserve(bias_dim, &db_out_tensor); weights_grad_.push_back(db_out_tensor); } blobs_buff->reserve(identity_dim, &identity_tensor_); train_in_tensor_ = train_in_tensor; if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated) mask_in_tensor_ = train_in_tensor; else { mask_in_tensor_ = mask_in_tensor; dRelu_in_tensor_ = dRelu_in_tensor; db_in_tensor_ = db_in_tensor; } train_out_tensor_ = train_out_tensor; mask_out_tensor_ = mask_out_tensor; dRelu_out_tensor_ = dRelu_out_tensor; db_out_tensor_ = db_out_tensor; blobs_buff->reserve(kernel_dim, &bias_grad_tensor_); std::vector<size_t> mask_dim = {m, n}; blobs_buff->reserve(mask_dim, &mask_in_tensor_temp_); } void FusedReluBiasFullyConnectedLayer::initialize() { // TODO: We need different bottom desc based on is_train or not const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); __half* identity = identity_tensor_.get_ptr(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; initialize_array<<<(m - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>(identity, m, __float2half(1.0f)); CK_CUBLAS_THROW_(cublasLtMatmulDescCreate(&cublas_op_desc_, CUBLAS_COMPUTE_32F, CUDA_R_32F)); cublasOperation_t trans = CUBLAS_OP_N; CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSA, &trans, sizeof(trans))); CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_TRANSB, &trans, sizeof(trans))); cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_RELU_AUX_BIAS; if (act_ == Activation_t::None) epi = CUBLASLT_EPILOGUE_BIAS; CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi))); const __half* bias = weights_half_[1].get_ptr(); CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute( cublas_op_desc_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bias, sizeof(bias))); if (act_ != Activation_t::None) { __half* reluMask = mask_out_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER, &reluMask, sizeof(reluMask)); long reluMaskLd = n; cublasLtMatmulDescSetAttribute(cublas_op_desc_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD, &reluMaskLd, sizeof(reluMaskLd)); } CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_kernel_desc_, CUDA_R_16F, n, k, n)); CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_bottom_desc_, CUDA_R_16F, k, m, k)); CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_top_desc_, CUDA_R_16F, n, m, n)); CK_CUBLAS_THROW_(cublasLtMatmulPreferenceCreate(&cublas_preference_)); cublaslt_workspace_size_ = 1024 * 1024 * 16; // Set it to 8MB for now CK_CUDA_THROW_(cudaMalloc(&cublaslt_workspace_, cublaslt_workspace_size_)); CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute( cublas_preference_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_, sizeof(cublaslt_workspace_size_))); uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST; CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(cublas_preference_, CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK, &pointer_mode, sizeof(pointer_mode))); // By default set algo to best estimated heurstic cublasLtMatmulHeuristicResult_t heuristic_result; int returned_res = 0; CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_, cublas_top_desc_, cublas_top_desc_, cublas_preference_, 1, &heuristic_result, &returned_res)); memcpy(&falgo_k_, &heuristic_result.algo, sizeof(falgo_k_)); if (returned_res == 0) { CK_CUBLAS_THROW_(CUBLAS_STATUS_NOT_SUPPORTED); } initialize_bprop(); } void FusedReluBiasFullyConnectedLayer::initialize_bprop() { // TODO: We need different bottom desc based on is_train or not const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; CK_CUBLAS_THROW_( cublasLtMatmulDescCreate(&cublas_op_desc_bprop_, CUBLAS_COMPUTE_32F, CUDA_R_32F)); cublasOperation_t transA = CUBLAS_OP_T; cublasOperation_t transB = CUBLAS_OP_N; CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute( cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSA, &transA, sizeof(transA))); CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute( cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_TRANSB, &transB, sizeof(transB))); if (pos_ == FcPosition_t::Head || pos_ == FcPosition_t::Isolated) { cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DEFAULT; CK_CUBLAS_THROW_(cublasLtMatmulDescSetAttribute( cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi))); } else if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) { cublasLtEpilogue_t epi = CUBLASLT_EPILOGUE_DRELU_BGRAD; cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE, &epi, sizeof(epi)); __half* bgrad = db_in_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_BIAS_POINTER, &bgrad, sizeof(bgrad)); __half* reluMask = mask_in_tensor_.get_ptr(); cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_POINTER, &reluMask, sizeof(reluMask)); long reluMaskLd = k; cublasLtMatmulDescSetAttribute(cublas_op_desc_bprop_, CUBLASLT_MATMUL_DESC_EPILOGUE_AUX_LD, &reluMaskLd, sizeof(reluMaskLd)); } CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_dRelu_top_desc_, CUDA_R_16F, n, m, n)); CK_CUBLAS_THROW_(cublasLtMatrixLayoutCreate(&cublas_dRelu_bottom_desc_, CUDA_R_16F, k, m, k)); CK_CUBLAS_THROW_(cublasLtMatmulPreferenceCreate(&cublas_preference_dRelu_)); cublaslt_workspace_size_ = 1024 * 1024 * 8; // Set it to 8MB for now CK_CUDA_THROW_(cudaMalloc(&cublaslt_workspace_dRelu_, cublaslt_workspace_size_)); CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute( cublas_preference_dRelu_, CUBLASLT_MATMUL_PREF_MAX_WORKSPACE_BYTES, &cublaslt_workspace_size_, sizeof(cublaslt_workspace_size_))); uint32_t pointer_mode = CUBLASLT_POINTER_MODE_MASK_HOST; CK_CUBLAS_THROW_(cublasLtMatmulPreferenceSetAttribute(cublas_preference_, CUBLASLT_MATMUL_PREF_POINTER_MODE_MASK, &pointer_mode, sizeof(pointer_mode))); // By default set algo to best estimated heurstic cublasLtMatmulHeuristicResult_t heuristic_result; int returned_res = 0; CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_, cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_, cublas_preference_dRelu_, 1, &heuristic_result, &returned_res)); memcpy(&balgo_dRelu_, &heuristic_result.algo, sizeof(balgo_dRelu_)); if (returned_res == 0) { CK_CUBLAS_THROW_(CUBLAS_STATUS_NOT_SUPPORTED); } } void FusedReluBiasFullyConnectedLayer::fprop(bool is_train) { CudaDeviceContext context(get_device_id()); PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.start", get_gpu().get_stream()); const __half* kernel = weights_half_[0].get_ptr(); const __half* bias = weights_half_[1].get_ptr(); const __half* bottom = get_bottom_tensor_fprop(is_train).get_ptr(); __half* top_fprop = train_out_tensor_.get_ptr(); __half* mask_out = mask_out_tensor_.get_ptr(); const auto& bottom_tensor_dim = get_bottom_tensor_fprop(is_train).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); size_t m = bottom_tensor_dim[0]; size_t n = top_tensor_dim[1]; size_t k = bottom_tensor_dim[1]; const float alpha = 1.0f; const float beta = 0.0f; PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.start", get_gpu().get_stream()); CK_CUBLAS_THROW_(cublasLtMatmul( get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel, cublas_kernel_desc_, bottom, cublas_bottom_desc_, &beta, top_fprop, cublas_top_desc_, top_fprop, cublas_top_desc_, &falgo_k_, cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream())); PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.cublasLtMatmul.stop", get_gpu().get_stream()); if ((pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) && act_ != Activation_t::None) { size_t len = train_out_tensor_.get_num_elements(); CK_CUDA_THROW_(cudaMemcpyAsync(mask_out, top_fprop, len * sizeof(__half), cudaMemcpyDeviceToDevice, get_gpu().get_stream())); } PROFILE_RECORD("fused_relu_bias_fully_connected.fprop.stop", get_gpu().get_stream()); #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } void FusedReluBiasFullyConnectedLayer::bprop() { CudaDeviceContext context(get_device_id()); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.start", get_gpu().get_stream()); const __half* kernel = weights_half_[0].get_ptr(); __half* mask_in = mask_in_tensor_.get_ptr(); const __half* train_out = train_out_tensor_.get_ptr(); __half* mask_out = mask_out_tensor_.get_ptr(); __half* kernel_grad = weights_grad_[0].get_ptr(); __half* bias_grad = weights_grad_[1].get_ptr(); const __half* bottom = get_bottom_tensor_fprop(true).get_ptr(); __half* bottom_bprop = get_bottom_tensor_bprop(true).get_ptr(); float* bias_grad_float = bias_grad_tensor_.get_ptr(); __half* dRelu_top = dRelu_out_tensor_.get_ptr(); const __half* identity = identity_tensor_.get_ptr(); const auto& bottom_tensor_dim = get_bottom_tensor_bprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; const float alpha = 1.0f; const float beta_k = 1.0f; const float beta_x = 0.0f; const float beta_b = 0.0f; PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.reverse_add_bias_and_re_kernel.start", get_gpu().get_stream()); if (pos_ == FcPosition_t::Tail || pos_ == FcPosition_t::Isolated) { if (act_ == Activation_t::None) { CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, 1, m, &alpha, train_out, CUDA_R_16F, n, identity, CUDA_R_16F, m, &beta_b, bias_grad, CUDA_R_16F, n, CUDA_R_32F, balgo_b_)); } else { initialize_array<<<(n - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>(bias_grad_float, n, 0.0f); dim3 blocks(n / 64, m / 32); reverse_add_bias_and_re_kernel<32><<<blocks, 512, 0, get_gpu().get_stream()>>>( bias_grad_float, dRelu_top, mask_out, train_out, n / 2); convert_array<<<(n - 1) / 1024 + 1, 1024, 0, get_gpu().get_stream()>>>(bias_grad, bias_grad_float, n); } } PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.reverse_add_bias_and_re_kernel.stop", get_gpu().get_stream()); if (act_ == Activation_t::None) { dRelu_top = train_out_tensor_.get_ptr(); } PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_1.start", get_gpu().get_stream()); CK_CUBLAS_THROW_(cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, dRelu_top, CUDA_R_16F, n, bottom, CUDA_R_16F, k, &beta_k, kernel_grad, CUDA_R_16F, n, CUDA_R_32F, balgo_k_)); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_1.stop", get_gpu().get_stream()); if (skip_dgrad_) { PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.stop", get_gpu().get_stream()); return; } if (pos_ == FcPosition_t::Body || pos_ == FcPosition_t::Tail) { bottom_bprop = dRelu_in_tensor_.get_ptr(); } PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.start", get_gpu().get_stream()); CK_CUBLAS_THROW_(cublasLtMatmul( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha, kernel, cublas_kernel_desc_, dRelu_top, cublas_dRelu_top_desc_, &beta_x, bottom_bprop, cublas_dRelu_bottom_desc_, bottom_bprop, cublas_dRelu_bottom_desc_, &balgo_dRelu_, cublaslt_workspace_dRelu_, cublaslt_workspace_size_, get_gpu().get_stream())); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.cublasGemmEx_2.stop", get_gpu().get_stream()); PROFILE_RECORD("fused_relu_bias_fully_connected.bprop.stop", get_gpu().get_stream()); #ifndef NDEBUG cudaDeviceSynchronize(); CK_CUDA_THROW_(cudaGetLastError()); #endif } void FusedReluBiasFullyConnectedLayer::search_algorithm() { // Set to the CUDA device where this layer assigned to CudaDeviceContext context(get_device_id()); const size_t repeat_num = 100; const int max_algo_count = 16; // Device Tensors to be used __half* bottom = get_bottom_tensor_fprop(true).get_ptr(); __half* top = train_out_tensor_.get_ptr(); __half* kernel = weights_half_[0].get_ptr(); __half* bias = weights_half_[1].get_ptr(); __half* kernel_grad = weights_grad_[0].get_ptr(); __half* bias_grad = weights_grad_[1].get_ptr(); __half* identity = identity_tensor_.get_ptr(); // Tensor dim const auto& bottom_tensor_dim = get_bottom_tensor_fprop(true).get_dimensions(); const auto& top_tensor_dim = train_out_tensor_.get_dimensions(); int m = bottom_tensor_dim[0]; int n = top_tensor_dim[1]; int k = bottom_tensor_dim[1]; // Record time for each algorithm float shortestTime = std::numeric_limits<float>::max(); float time; cudaEvent_t start, stop; CK_CUDA_THROW_(cudaEventCreate(&start)); CK_CUDA_THROW_(cudaEventCreate(&stop)); cublasLtMatmulHeuristicResult_t heuristic_result[max_algo_count] = {0}; int algo_count = 0; CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_, cublas_kernel_desc_, cublas_bottom_desc_, cublas_top_desc_, cublas_top_desc_, cublas_preference_, max_algo_count, heuristic_result, &algo_count)); if (algo_count == 0) { CK_CUBLAS_THROW_(CUBLAS_STATUS_NOT_SUPPORTED); } // if(get_device_id()==0) printf("M: %d, N: %d, K: %d\n", m, n, k); for (int algoIdx = 0; algoIdx < algo_count; algoIdx++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_, &alpha, kernel, cublas_kernel_desc_, bottom, cublas_bottom_desc_, &beta, top, cublas_top_desc_, top, cublas_top_desc_, &heuristic_result[algoIdx].algo, cublaslt_workspace_, cublaslt_workspace_size_, get_gpu().get_stream()); } CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(cudaEventSynchronize(stop)); CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo); continue; } // if(get_device_id()==0) printf("Algo: %d, wavesCount: %f, time: %f\n", // (int)heuristic_result[algoIdx].algo, // heuristic_result[algoIdx].wavesCount, // time); // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; memcpy(&falgo_k_, &heuristic_result[algoIdx].algo, sizeof(falgo_k_)); // if(get_device_id()==0) printf("Picked algorithm: %d", heuristic_result[algoIdx].algo); } } // dRelu in backward pass // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); cublasLtMatmulHeuristicResult_t heuristic_result_dRelu[max_algo_count] = {0}; int algo_count_dRelu = 0; CK_CUBLAS_THROW_(cublasLtMatmulAlgoGetHeuristic( get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, cublas_kernel_desc_, cublas_dRelu_top_desc_, cublas_dRelu_bottom_desc_, cublas_dRelu_bottom_desc_, cublas_preference_dRelu_, max_algo_count, heuristic_result_dRelu, &algo_count_dRelu)); if (algo_count_dRelu == 0) { CK_CUBLAS_THROW_(CUBLAS_STATUS_NOT_SUPPORTED); } for (int algoIdx = 0; algoIdx < algo_count_dRelu; algoIdx++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasLtMatmul(get_gpu().get_cublaslt_handle(), cublas_op_desc_bprop_, &alpha, kernel, cublas_kernel_desc_, top, cublas_dRelu_top_desc_, &beta, bottom, cublas_dRelu_bottom_desc_, bottom, cublas_dRelu_bottom_desc_, &heuristic_result_dRelu[algoIdx].algo, cublaslt_workspace_dRelu_, cublaslt_workspace_size_, get_gpu().get_stream()); } CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(cudaEventSynchronize(stop)); CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for fprop, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; memcpy(&balgo_dRelu_, &heuristic_result_dRelu[algoIdx].algo, sizeof(balgo_dRelu_)); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Start, end for search const cublasGemmAlgo_t startAlgo = CUBLAS_GEMM_DEFAULT_TENSOR_OP; const cublasGemmAlgo_t endAlgo = CUBLAS_GEMM_ALGO15_TENSOR_OP; // Search all the algorithm for balgo_k_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 1.0f; // Record start event CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_T, n, k, m, &alpha, top, CUDA_R_16F, n, bottom, CUDA_R_16F, k, &beta, kernel_grad, CUDA_R_16F, n, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(cudaEventSynchronize(stop)); CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_k_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Search all the algorithm for balgo_b_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const float alpha = 1.0f; const float beta = 0.0f; // Record start event CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_N, CUBLAS_OP_N, n, 1, m, &alpha, top, CUDA_R_16F, n, identity, CUDA_R_16F, m, &beta, bias_grad, CUDA_R_16F, n, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(cudaEventSynchronize(stop)); CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_W, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_b_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Reset shortestTime shortestTime = std::numeric_limits<float>::max(); // Search all the algorithm for balgo_x_ for (int testAlgo = startAlgo; testAlgo <= endAlgo; testAlgo++) { cublasStatus_t status = CUBLAS_STATUS_SUCCESS; const __half alpha = 1.0f; const __half beta = 0.0f; // Record start event CK_CUDA_THROW_(cudaEventRecord(start, get_gpu().get_stream())); for (size_t i = 0; i < repeat_num && status == CUBLAS_STATUS_SUCCESS; ++i) { status = cublasGemmEx(get_gpu().get_cublas_handle(), CUBLAS_OP_T, CUBLAS_OP_N, k, m, n, &alpha, kernel, CUDA_R_16F, n, top, CUDA_R_16F, n, &beta, bottom, CUDA_R_16F, k, CUDA_R_32F, static_cast<cublasGemmAlgo_t>(testAlgo)); } CK_CUDA_THROW_(cudaEventRecord(stop, get_gpu().get_stream())); CK_CUDA_THROW_(cudaEventSynchronize(stop)); CK_CUDA_THROW_(cudaEventElapsedTime(&time, start, stop)); // Avg Time(ms) for this alorithm for fprop GEMM time = time / repeat_num; // Skip if the algorithm is supported for fprop configuration if (status != CUBLAS_STATUS_SUCCESS) { // printf("The algorithms %d is not supported for bprop_Xn, skipped.\n", testAlgo); continue; } // Record the optimal time and algorithm if (time < shortestTime) { shortestTime = time; balgo_x_ = static_cast<cublasGemmAlgo_t>(testAlgo); } } // Print selection information // printf("The algorithm selection for falgo_k_, balgo_k_, balgo_x_ are: %d, %d and %d.\n", // (int)falgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP, // (int)balgo_k_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP, // (int)balgo_x_ - CUBLAS_GEMM_DEFAULT_TENSOR_OP); // Output msg // MESSAGE_("The fully-connected layer has finished choosing the algorithm for cublas Gemm."); // Clean-up CK_CUDA_THROW_(cudaEventDestroy(start)); CK_CUDA_THROW_(cudaEventDestroy(stop)); } // namespace HugeCTR std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_uniform_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; float limit = 1.0f / ((0 == index ? bottom_dim : 0) + top_dim); return std::make_unique<UniformDataSimulator>(-1 * limit, limit); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_uniform_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Uniform, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_xavier_norm_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; return std::make_unique<VarianceScalingSimulator>(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, 0 == index ? bottom_dim : 0, top_dim); } std::unique_ptr<DataSimulator> FusedReluBiasFullyConnectedLayer::get_default_initializer( const int index) { size_t bottom_dim = get_bottom_tensor_fprop(true).get_dimensions()[1]; size_t top_dim = train_out_tensor_.get_dimensions()[1]; std::unique_ptr<DataSimulator> simu(nullptr); if (0 == index) { simu.reset(new VarianceScalingSimulator(1.f, data_simu::Mode_t::Fan_avg, data_simu::Distribution_t::Norm, bottom_dim, top_dim)); } else if (1 == index) { float stddev = sqrt(1.f / top_dim); simu.reset(new GaussianDataSimulator(0, stddev, -2 * stddev, 2 * stddev)); } else { CK_THROW_(Error_t::OutOfBound, "index != {0, 1}."); } return simu; } } // namespace HugeCTR
0f57001896be8472db2518beb6d933fa5e00455e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int out_index = i; int out_w = i % (w*stride); i = i / (w*stride); int out_h = i % (h*stride); i = i / (h*stride); int out_c = i%c; i = i / c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if (forward) out[out_index] += scale * x[in_index]; else atomicAdd(x + in_index, scale * out[out_index]); }
0f57001896be8472db2518beb6d933fa5e00455e.cu
#include "includes.h" __global__ void upsample_kernel(size_t N, float *x, int w, int h, int c, int batch, int stride, int forward, float scale, float *out) { size_t i = (blockIdx.x + blockIdx.y*gridDim.x) * blockDim.x + threadIdx.x; if (i >= N) return; int out_index = i; int out_w = i % (w*stride); i = i / (w*stride); int out_h = i % (h*stride); i = i / (h*stride); int out_c = i%c; i = i / c; int b = i%batch; int in_w = out_w / stride; int in_h = out_h / stride; int in_c = out_c; int in_index = b*w*h*c + in_c*w*h + in_h*w + in_w; if (forward) out[out_index] += scale * x[in_index]; else atomicAdd(x + in_index, scale * out[out_index]); }
1a466cedfad55b8f66608d7f9bf8133336069ac1.hip
// !!! This is a file automatically generated by hipify!!! #ifndef _SLICE_KERNEL_ #define _SLICE_KERNEL_ #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <stdio.h> #include "config.h" template <typename T> __global__ void getRowsKernel(T * a, T * c, unsigned int first, unsigned int last, int M, int N){ int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; if(row >= first && row <= last && col < N){ c[(first - row) * N + col] = a[row * N + col]; } } template <typename T> __global__ void getColumnsKernel(T * a, T * c, unsigned int first, unsigned int last, int M, int N){ int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; if(col >= first && col <= last && row < M){ c[row * (last - first + 1) + (col - first)] = a[row * N + col]; } } /* Wrapper function for addKernel n - array size */ template <typename T> void getRows(T * a, T * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock){ int nRows = last - first + 1; dim3 grid((int) ceil(N/(float)THREADS_PER_BLOCK), (int) ceil(M/(float)THREADS_PER_BLOCK), 1); dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1); // launch kernel hipLaunchKernelGGL(( getRowsKernel<T>), dim3(grid), dim3(block), 0, 0, a, c, first, last, M, N); //check if launch was successful // hipError_t cudaerr = hipDeviceSynchronize(); // if (cudaerr != hipSuccess) // printf("slice kernel launch failed with error \"%s\".\n", // hipGetErrorString(cudaerr)); } template <typename T> void getColumns(T * a, T * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock){ int nColumns = last - first + 1; dim3 grid((int) ceil(N/(float)THREADS_PER_BLOCK), (int) ceil(M/(float)THREADS_PER_BLOCK), 1); dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1); // launch kernel hipLaunchKernelGGL(( getColumnsKernel<T>), dim3(grid), dim3(block), 0, 0, a, c, first, last, M, N); //check if launch was successful // hipError_t cudaerr = hipDeviceSynchronize(); // if (cudaerr != hipSuccess) // printf("slice kernel launch failed with error \"%s\".\n", // hipGetErrorString(cudaerr)); // // printf("inside getcolumnlauncher:%d * %d, %d * %d, %d\n", first, last, M, N, M * N); } // Initialize templates for float, double and int template void getRows<int>(int * a, int * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void getRows<float>(float * a, float * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void getRows<double>(double * a, double * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void getColumns<int>(int * a, int * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void getColumns<float>(float * a, float * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); ;template void getColumns<double>(double * a, double * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); #endif
1a466cedfad55b8f66608d7f9bf8133336069ac1.cu
#ifndef _SLICE_KERNEL_ #define _SLICE_KERNEL_ #include <cuda_runtime.h> #include <cuda.h> #include <stdio.h> #include "config.h" template <typename T> __global__ void getRowsKernel(T * a, T * c, unsigned int first, unsigned int last, int M, int N){ int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; if(row >= first && row <= last && col < N){ c[(first - row) * N + col] = a[row * N + col]; } } template <typename T> __global__ void getColumnsKernel(T * a, T * c, unsigned int first, unsigned int last, int M, int N){ int row = threadIdx.y + blockDim.y * blockIdx.y; int col = threadIdx.x + blockDim.x * blockIdx.x; if(col >= first && col <= last && row < M){ c[row * (last - first + 1) + (col - first)] = a[row * N + col]; } } /* Wrapper function for addKernel n - array size */ template <typename T> void getRows(T * a, T * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock){ int nRows = last - first + 1; dim3 grid((int) ceil(N/(float)THREADS_PER_BLOCK), (int) ceil(M/(float)THREADS_PER_BLOCK), 1); dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1); // launch kernel getRowsKernel<T><<<grid, block>>>(a, c, first, last, M, N); //check if launch was successful // cudaError_t cudaerr = cudaDeviceSynchronize(); // if (cudaerr != CUDA_SUCCESS) // printf("slice kernel launch failed with error \"%s\".\n", // cudaGetErrorString(cudaerr)); } template <typename T> void getColumns(T * a, T * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock){ int nColumns = last - first + 1; dim3 grid((int) ceil(N/(float)THREADS_PER_BLOCK), (int) ceil(M/(float)THREADS_PER_BLOCK), 1); dim3 block(THREADS_PER_BLOCK, THREADS_PER_BLOCK, 1); // launch kernel getColumnsKernel<T><<<grid, block>>>(a, c, first, last, M, N); //check if launch was successful // cudaError_t cudaerr = cudaDeviceSynchronize(); // if (cudaerr != CUDA_SUCCESS) // printf("slice kernel launch failed with error \"%s\".\n", // cudaGetErrorString(cudaerr)); // // printf("inside getcolumnlauncher:%d * %d, %d * %d, %d\n", first, last, M, N, M * N); } // Initialize templates for float, double and int template void getRows<int>(int * a, int * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void getRows<float>(float * a, float * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void getRows<double>(double * a, double * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void getColumns<int>(int * a, int * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); template void getColumns<float>(float * a, float * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); ;template void getColumns<double>(double * a, double * c, unsigned int first, unsigned int last, unsigned int M, unsigned int N, unsigned int threadsPerBlock); #endif
f2e45b2b5b6daa023c0cd83dafe8151747f5c999.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include "mytime.h" __global__ void bankcheck() { __shared__ unsigned s[1024]; s[1 * threadIdx.x] = threadIdx.x; } __global__ void bankcheck2() { __shared__ unsigned s[1024]; s[32 * threadIdx.x] = threadIdx.x; } int main() { int ii; double start, end; hipLaunchKernelGGL(( bankcheck), dim3(1), dim3(32), 0, 0, ); // dummy for warmup. hipDeviceSynchronize(); start = rtclock(); for (ii = 0; ii < 1000; ++ii) { hipLaunchKernelGGL(( bankcheck), dim3(1), dim3(32), 0, 0, ); hipDeviceSynchronize(); } end = rtclock(); printtime("bank consecutive: ", start, end); start = rtclock(); for (ii = 0; ii < 1000; ++ii) { hipLaunchKernelGGL(( bankcheck2), dim3(1), dim3(32), 0, 0, ); hipDeviceSynchronize(); } end = rtclock(); printtime("bank strided: ", start, end); return 0; }
f2e45b2b5b6daa023c0cd83dafe8151747f5c999.cu
#include <stdio.h> #include <cuda.h> #include "mytime.h" __global__ void bankcheck() { __shared__ unsigned s[1024]; s[1 * threadIdx.x] = threadIdx.x; } __global__ void bankcheck2() { __shared__ unsigned s[1024]; s[32 * threadIdx.x] = threadIdx.x; } int main() { int ii; double start, end; bankcheck<<<1, 32>>>(); // dummy for warmup. cudaDeviceSynchronize(); start = rtclock(); for (ii = 0; ii < 1000; ++ii) { bankcheck<<<1, 32>>>(); cudaDeviceSynchronize(); } end = rtclock(); printtime("bank consecutive: ", start, end); start = rtclock(); for (ii = 0; ii < 1000; ++ii) { bankcheck2<<<1, 32>>>(); cudaDeviceSynchronize(); } end = rtclock(); printtime("bank strided: ", start, end); return 0; }
a740ce1e927c2716914b9b404931bc81130af8e6.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2019 Nobuyuki Umetani * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <cstdio> #include <cstdlib> #include <cassert> #include <cstdint> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #include "hip/hip_runtime.h" #include "cu_bvh.h" namespace dfm2 = delfem2; __device__ void device_AtomicMaxFloat(float * const address, const float value) { if ( *address >= value ) { return; } int * const address_as_i = (int *)address; int old = * address_as_i, assumed; do { assumed = old; if (__int_as_float(assumed) >= value) { break; } old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while (assumed != old); } __device__ void device_AtomicMinFloat(float * const address, const float value) { if ( *address <= value ) { return; } int * const address_as_i = (int *)address; int old = * address_as_i, assumed; do { assumed = old; if (__int_as_float(assumed) <= value) { break; } old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while (assumed != old); } template <typename REAL> __device__ float device_Distance3( const REAL p0[3], const REAL p1[3]) { const REAL v = (p1[0]-p0[0])*(p1[0]-p0[0]) + (p1[1]-p0[1])*(p1[1]-p0[1]) + (p1[2]-p0[2])*(p1[2]-p0[2]); return sqrt(v); } __device__ unsigned int device_ExpandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } __device__ unsigned int device_MortonCode(float x, float y, float z) { auto ix = (unsigned int)fmin(fmax(x * 1024.0f, 0.0f), 1023.0f); auto iy = (unsigned int)fmin(fmax(y * 1024.0f, 0.0f), 1023.0f); auto iz = (unsigned int)fmin(fmax(z * 1024.0f, 0.0f), 1023.0f); // std::cout << std::bitset<10>(ix) << " " << std::bitset<10>(iy) << " " << std::bitset<10>(iz) << std::endl; ix = device_ExpandBits(ix); iy = device_ExpandBits(iy); iz = device_ExpandBits(iz); // std::cout << std::bitset<30>(ix) << " " << std::bitset<30>(iy) << " " << std::bitset<30>(iz) << std::endl; return ix * 4 + iy * 2 + iz; } __device__ int device_Delta(int i, int j, const unsigned int* sortedMC, int nMC) { if ( j<0 || j >= nMC ){ return -1; } return __clz(sortedMC[i] ^ sortedMC[j]); } __device__ int2 device_MortonCode_DeterminRange( const unsigned int* sortedMC, int nMC, int imc) { if( imc == 0 ){ return make_int2(0,nMC-1); } // ---------------------- const std::uint32_t mc0 = sortedMC[imc-1]; const std::uint32_t mc1 = sortedMC[imc+0]; const std::uint32_t mc2 = sortedMC[imc+1]; if( mc0 == mc1 && mc1 == mc2 ){ // for hash value collision int jmc=imc+1; for(;jmc<nMC;++jmc){ if( sortedMC[jmc] != mc1 ) break; } return make_int2(imc,jmc-1); } int d = device_Delta(imc, imc + 1, sortedMC, nMC) - device_Delta(imc, imc - 1, sortedMC, nMC); d = d > 0 ? 1 : -1; //compute the upper bound for the length of the range const int delta_min = device_Delta(imc, imc - d, sortedMC, nMC); int lmax = 2; while (device_Delta(imc, imc + lmax*d, sortedMC, nMC)>delta_min) { lmax = lmax * 2; } //find the other end using binary search int l = 0; for (int t = lmax / 2; t >= 1; t /= 2) { if (device_Delta(imc, imc + (l + t)*d, sortedMC, nMC)>delta_min) { l = l + t; } } int j = imc + l*d; int2 range = make_int2(-1,-1); if (imc <= j) { range.x = imc; range.y = j; } else { range.x = j; range.y = imc; } return range; } __device__ int device_MortonCode_FindSplit( const unsigned int* sortedMC, unsigned int iMC_start, unsigned int iMC_last) { //return -1 if there is only //one primitive under this node. if (iMC_start == iMC_last) { return -1; } // ------------------------------ const int common_prefix = __clz(sortedMC[iMC_start] ^ sortedMC[iMC_last]); //handle duplicated morton code if (common_prefix == 32 ){ return iMC_start; } // sizeof(std::uint32_t)*8 // Use binary search to find where the next bit differs. // Specifically, we are looking for the highest object that // shares more than commonPrefix bits with the first one. const std::uint32_t mcStart = sortedMC[iMC_start]; int iMC_split = iMC_start; // initial guess int step = iMC_last - iMC_start; do { step = (step + 1) >> 1; // exponential decrease const int newSplit = iMC_split + step; // proposed new position if (newSplit < iMC_last){ const unsigned int splitCode = sortedMC[newSplit]; int splitPrefix = __clz(mcStart ^ splitCode); if (splitPrefix > common_prefix){ iMC_split = newSplit; // accept proposal } } } while (step > 1); return iMC_split; } template <typename REAL> class CudaBV_Sphere { public: __device__ void Set_Inactive() { r = -1; } __device__ void AddPoint(const REAL p[3], REAL R){ if( R < 0 ){ return; } if( r < 0 ){ c[0]=p[0]; c[1]=p[1]; c[2]=p[2]; r=R; return; } const REAL L = device_Distance3(p,c); if( r>L+R ){ return; } // including if( R>L+r){ // included c[0]=p[0]; c[1]=p[1]; c[2]=p[2]; r=R; return; } if( fabs(L) <= 1.0e-5*fabs(r+R) ){ // almost co-centric r = L+R; return; } const REAL r0 = 0.5*(L+r-R)/L; const REAL r1 = 0.5*(L+R-r)/L; assert( r0 >= 0 && r1 >= 0 ); c[0] = r0*c[0] + r1*p[0]; c[1] = r0*c[1] + r1*p[1]; c[2] = r0*c[2] + r1*p[2]; r = 0.5*(L+r+R); return; } __device__ void Add(const CudaBV_Sphere<REAL>& bb) { this->AddPoint(bb.c,bb.r); } __device__ void Range_DistToPoint(REAL& min0, REAL& max0, const REAL p[3]) const { if( r < 0 ){ return; } const REAL L = device_Distance3(p,c); if( L < r ){ min0 = 0; max0 = r+L; return; } min0 = L-r; max0 = L+r; } public: REAL r, c[3]; }; template <typename REAL> class CudaBV_AABB3 { public: __device__ void Set_Inactive() { bbmin[0] = +1; bbmax[0] = -1; } __device__ bool IsActive() const { if( bbmin[0] > bbmax[0] ){ return false; } return true; } __device__ void AddPoint(const REAL p[3], REAL eps){ if( eps < 0 ){ return; } if( !this->IsActive() ){ // something inside bbmin[0] = p[0]-eps; bbmax[0] = p[0]+eps; bbmin[1] = p[1]-eps; bbmax[1] = p[1]+eps; bbmin[2] = p[2]-eps; bbmax[2] = p[2]+eps; return; } bbmin[0] = ( bbmin[0] < p[0]-eps ) ? bbmin[0] : p[0]-eps; bbmin[1] = ( bbmin[1] < p[1]-eps ) ? bbmin[1] : p[1]-eps; bbmin[2] = ( bbmin[2] < p[2]-eps ) ? bbmin[2] : p[2]-eps; bbmax[0] = ( bbmax[0] > p[0]+eps ) ? bbmax[0] : p[0]+eps; bbmax[1] = ( bbmax[1] > p[1]+eps ) ? bbmax[1] : p[1]+eps; bbmax[2] = ( bbmax[2] > p[2]+eps ) ? bbmax[2] : p[2]+eps; } __device__ void Add(const CudaBV_AABB3<REAL>& bb){ if( !bb.IsActive() ){ return; } if( !this->IsActive() ){ bbmax[0] = bb.bbmax[0]; bbmin[0] = bb.bbmin[0]; bbmax[1] = bb.bbmax[1]; bbmin[1] = bb.bbmin[1]; bbmax[2] = bb.bbmax[2]; bbmin[2] = bb.bbmin[2]; return; } bbmin[0] = ( bbmin[0] < bb.bbmin[0] ) ? bbmin[0] : bb.bbmin[0]; bbmin[1] = ( bbmin[1] < bb.bbmin[1] ) ? bbmin[1] : bb.bbmin[1]; bbmin[2] = ( bbmin[2] < bb.bbmin[2] ) ? bbmin[2] : bb.bbmin[2]; bbmax[0] = ( bbmax[0] > bb.bbmax[0] ) ? bbmax[0] : bb.bbmax[0]; bbmax[1] = ( bbmax[1] > bb.bbmax[1] ) ? bbmax[1] : bb.bbmax[1]; bbmax[2] = ( bbmax[2] > bb.bbmax[2] ) ? bbmax[2] : bb.bbmax[2]; return; } public: REAL bbmin[3], bbmax[3]; }; // --------------------------------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- __global__ void kernel_MinMax_TPB256( float *d_minmax, const float *d_XYZ, unsigned int np) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int s_idx = threadIdx.x; assert( blockDim.y == 3 && blockIdx.y == 0 ); unsigned int idy = threadIdx.y; if( idx >= np ){ return; } // --------------- const unsigned int BLOCK = 256; assert(blockDim.x == BLOCK); __shared__ float s_XYZ[BLOCK][3]; s_XYZ[s_idx][idy] = d_XYZ[idx*3+idy]; __syncthreads(); if( s_idx == 0 ) { float vmin = s_XYZ[0][idy]; float vmax = s_XYZ[0][idy]; int ns = BLOCK; if( blockDim.x * (blockIdx.x+1) > np ) { ns = np - blockDim.x * blockIdx.x; } for(int is=0;is<ns;++is){ if( s_XYZ[is][idy] < vmin ){ vmin = s_XYZ[is][idy]; } if( s_XYZ[is][idy] > vmax ){ vmax = s_XYZ[is][idy]; } } device_AtomicMinFloat(d_minmax+idy+0,vmin); device_AtomicMaxFloat(d_minmax+idy+3,vmax); } } void dfm2::cuda::cuda_Min3Max3_Points3F( float *h_min3, float *h_max3, const float *h_XYZ, unsigned int np) { h_min3[0] = h_max3[0] = h_XYZ[0]; h_min3[1] = h_max3[1] = h_XYZ[1]; h_min3[2] = h_max3[2] = h_XYZ[2]; // -------------------------------------- const thrust::device_vector<float> d_XYZ(h_XYZ,h_XYZ+np*3); thrust::device_vector<float> d_minmax(6); { const unsigned int BLOCK = 256; dim3 grid((np - 1) / BLOCK + 1); dim3 block(BLOCK, 3); hipLaunchKernelGGL(( kernel_MinMax_TPB256) , dim3(grid), dim3(block) , 0, 0, thrust::raw_pointer_cast(d_minmax.data()), thrust::raw_pointer_cast(d_XYZ.data()), np); } thrust::copy(d_minmax.begin()+0,d_minmax.begin()+3, h_min3); thrust::copy(d_minmax.begin()+3,d_minmax.begin()+6, h_max3); } // --------------------------------------------------------------------------------------------------------------------- __global__ void kernel_CentRad_MeshTri3D_TPB64( float *dXYZ_c, float *dMaxRad, const float *dXYZ, const unsigned int nXYZ, const unsigned int *dTri, const unsigned int nTri) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx >= nTri ) return; // ---------------------------- const unsigned int itri = idx; const unsigned int i0 = dTri[itri*3+0]; const unsigned int i1 = dTri[itri*3+1]; const unsigned int i2 = dTri[itri*3+2]; assert( i0 < nXYZ && i1 < nXYZ && i2 < nXYZ ); const float p0[3] = {dXYZ[i0*3+0],dXYZ[i0*3+1],dXYZ[i0*3+2]}; const float p1[3] = {dXYZ[i1*3+0],dXYZ[i1*3+1],dXYZ[i1*3+2]}; const float p2[3] = {dXYZ[i2*3+0],dXYZ[i2*3+1],dXYZ[i2*3+2]}; const float pc[3] = { (p0[0]+p1[0]+p2[0])/3.f, (p0[1]+p1[1]+p2[1])/3.f, (p0[2]+p1[2]+p2[2])/3.f }; dXYZ_c[itri*3+0] = pc[0]; dXYZ_c[itri*3+1] = pc[1]; dXYZ_c[itri*3+2] = pc[2]; // --------------------- const float l0 = device_Distance3(pc, p0); const float l1 = device_Distance3(pc, p1); const float l2 = device_Distance3(pc, p2); float lm = l0; if( l1 > lm ){ lm = l1; } if( l2 > lm ){ lm = l2; } const unsigned int TPB = 64; assert( blockDim.x == TPB ); __shared__ float sRad[TPB]; const unsigned int s_idx = threadIdx.x; sRad[s_idx] = lm; __syncthreads(); if( s_idx == 0 ) { int ns = TPB; if( blockDim.x * (blockIdx.x+1) > nTri ) { ns = nTri - blockDim.x * blockIdx.x; } float blockRadMax = sRad[0]; for(int ins=1;ins<ns;++ins){ if( sRad[ins] > blockRadMax ){ blockRadMax = sRad[ins]; } } device_AtomicMaxFloat(dMaxRad, blockRadMax); } } void dfm2::cuda::cuda_CentsMaxRad_MeshTri3F( float* hXYZ_c, float* hMaxRad, const float *hXYZ, const unsigned int nXYZ, const unsigned int *hTri, const unsigned int nTri) { const thrust::device_vector<float> dXYZ(hXYZ, hXYZ+nXYZ*3); const thrust::device_vector<unsigned int> dTri(hTri, hTri+nTri*3); thrust::device_vector<float> dMaxRad(1); thrust::device_vector<float> dXYZ_c(nTri*3); { const unsigned int BLOCK = 64; dim3 grid( (nTri-1)/BLOCK + 1 ); dim3 block( BLOCK ); hipLaunchKernelGGL(( kernel_CentRad_MeshTri3D_TPB64) , dim3(grid), dim3(block) , 0, 0, thrust::raw_pointer_cast(dXYZ_c.data()), thrust::raw_pointer_cast(dMaxRad.data()), thrust::raw_pointer_cast(dXYZ.data()), nXYZ, thrust::raw_pointer_cast(dTri.data()), nTri); } thrust::copy(dXYZ_c.begin(), dXYZ_c.end(), hXYZ_c); thrust::copy(dMaxRad.begin(), dMaxRad.end(), hMaxRad); } // -------------------------------------------------------------------------------- __global__ void kernel_MortonCodeId_Points3F_TPB64( unsigned int *dMC, unsigned int *dId, const float *dXYZ, const unsigned int nXYZ, const float* min_xyz, const float* max_xyz) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx >= nXYZ ) return; dId[idx] = idx; // ---------------------------- const float x0 = (dXYZ[idx*3+0]-min_xyz[0])/(max_xyz[0]-min_xyz[0]); const float y0 = (dXYZ[idx*3+1]-min_xyz[1])/(max_xyz[1]-min_xyz[1]); const float z0 = (dXYZ[idx*3+2]-min_xyz[2])/(max_xyz[2]-min_xyz[2]); unsigned int mc = device_MortonCode(x0,y0,z0); dMC[idx] = mc; } void dfm2::cuda::cuda_MortonCode_Points3FSorted( unsigned int *hSortedId, std::uint32_t *hSortedMc, const float *hXYZ, const unsigned int nXYZ, const float* hMinXYZ, const float* hMaxXYZ) { const thrust::device_vector<float> dXYZ(hXYZ, hXYZ+nXYZ*3); thrust::device_vector<unsigned int> dMC(nXYZ); thrust::device_vector<unsigned int> dId(nXYZ); thrust::device_vector<float> dMinXYZ(hMinXYZ,hMinXYZ+6); thrust::device_vector<float> dMaxXYZ(hMaxXYZ,hMaxXYZ+6); { const unsigned int BLOCK = 64; dim3 grid( (nXYZ-1)/BLOCK+1 ); dim3 block( BLOCK ); hipLaunchKernelGGL(( kernel_MortonCodeId_Points3F_TPB64) , dim3(grid), dim3(block) , 0, 0, thrust::raw_pointer_cast(dMC.data()), thrust::raw_pointer_cast(dId.data()), thrust::raw_pointer_cast(dXYZ.data()), nXYZ, thrust::raw_pointer_cast(dMinXYZ.data()), thrust::raw_pointer_cast(dMaxXYZ.data())); } thrust::sort_by_key(dMC.begin(),dMC.end(),dId.begin()); thrust::copy(dMC.begin(), dMC.end(), hSortedMc); thrust::copy(dId.begin(), dId.end(), hSortedId); } // ------------------------------------------------ __global__ void kernel_MortonCode_BVHTopology_TPB64( dfm2::CNodeBVH2* dNodeBVH, const unsigned int *dSortedMC, const unsigned int *dSortedId, const unsigned int nMC) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= nMC-1) return; const unsigned int ini = idx; const unsigned int nni = nMC-1; // ------------------------------- const int2 range = device_MortonCode_DeterminRange(dSortedMC,nMC,ini); const int isplit = device_MortonCode_FindSplit(dSortedMC,range.x,range.y); // printf("%d --> %d %d %d\n",ini, range.x, range.y, isplit); // ------------------------------- if( range.x == isplit ){ const unsigned int inlA = nni+isplit; dNodeBVH[ini].ichild[0] = inlA; dNodeBVH[inlA].iparent = ini; dNodeBVH[inlA].ichild[0] = dSortedId[isplit]; dNodeBVH[inlA].ichild[1] = UINT_MAX; } else{ const unsigned int iniA = isplit; dNodeBVH[ini].ichild[0] = iniA; dNodeBVH[iniA].iparent = ini; } // ---- if( range.y == isplit+1 ){ const unsigned int inlB = nni+isplit+1; dNodeBVH[ini].ichild[1] = inlB; dNodeBVH[inlB].iparent = ini; dNodeBVH[inlB].ichild[0] = dSortedId[isplit+1]; dNodeBVH[inlB].ichild[1] = UINT_MAX; } else{ const unsigned int iniB = isplit+1; dNodeBVH[ini].ichild[1] = iniB; dNodeBVH[iniB].iparent = ini; } } void dfm2::cuda::cuda_MortonCode_BVHTopology( CNodeBVH2* hNodeBVH, const unsigned int* aSortedId, const std::uint32_t* aSortedMc, unsigned int N) { const thrust::device_vector<std::uint32_t> dMC(aSortedMc,aSortedMc+N); const thrust::device_vector<unsigned int> dId(aSortedId,aSortedId+N); thrust::device_vector<dfm2::CNodeBVH2> dNodeBVH(N*2-1); // ---------------------------------- { const unsigned int BLOCK = 64; dim3 grid( (N-1)/BLOCK+1 ); dim3 block( BLOCK ); hipLaunchKernelGGL(( kernel_MortonCode_BVHTopology_TPB64) , dim3(grid), dim3(block) , 0, 0, thrust::raw_pointer_cast(dNodeBVH.data()), thrust::raw_pointer_cast(dMC.data()), thrust::raw_pointer_cast(dId.data()), N); } thrust::copy(dNodeBVH.begin(), dNodeBVH.end(), hNodeBVH); hNodeBVH[0].iparent = UINT_MAX; } // ------------------------------------------------------------------------ template <typename BBOX> __global__ void kernel_BVHGeometry( BBOX* dBox, int* dNum, // const dfm2::CNodeBVH2* dNodeBVH, const float* dXYZ, const unsigned int* dTri, unsigned int nTri, float eps) { const unsigned int ino = blockDim.x * blockIdx.x + threadIdx.x; if (ino >= nTri) return; { // make aabb for triangle assert( dNodeBVH[nTri - 1 + ino].ichild[1] == UINT_MAX ); assert( dNodeBVH[nTri - 1 + ino].iparent < nTri-1 ); const int itri = dNodeBVH[nTri - 1 + ino].ichild[0]; assert(itri >= 0 && itri < nTri); const unsigned int i0 = dTri[itri * 3 + 0]; const unsigned int i1 = dTri[itri * 3 + 1]; const unsigned int i2 = dTri[itri * 3 + 2]; const float *p0 = dXYZ + i0 * 3; const float *p1 = dXYZ + i1 * 3; const float *p2 = dXYZ + i2 * 3; dBox[nTri-1+ino].Set_Inactive(); dBox[nTri-1+ino].AddPoint(p0,eps); dBox[nTri-1+ino].AddPoint(p1,eps); dBox[nTri-1+ino].AddPoint(p2,eps); } // ---------------------------------------------------- unsigned int ino0 = dNodeBVH[nTri-1+ino].iparent; while(true){ assert( ino0 < nTri-1 ); //assert( dNodeBVH[ino0].ichild[0] >= 0 ); //assert( dNodeBVH[ino0].ichild[1] >= 0 ); const unsigned int inoc0 = dNodeBVH[ino0].ichild[0]; const unsigned int inoc1 = dNodeBVH[ino0].ichild[1]; assert( dNodeBVH[inoc0].iparent == ino0 ); assert( dNodeBVH[inoc1].iparent == ino0 ); assert( inoc0 < nTri*2-1 ); assert( inoc1 < nTri*2-1 ); const int iflg_old = atomicCAS(dNum+ino0,0,1); if( iflg_old == 0 ){ // let the another branch of the binary tree do the work return; } __threadfence(); // sync global memory // --------------------------------------- dBox[ino0].Set_Inactive(); dBox[ino0].Add(dBox[inoc0]); dBox[ino0].Add(dBox[inoc1]); // ---------------------------------------- if( dNodeBVH[ino0].iparent == UINT_MAX ){ assert(ino0==0); return; } ino0 = dNodeBVH[ino0].iparent; } } void dfm2::cuda::cuda_BVHGeometry_AABB3f( dfm2::CBV3_AABB<float>* hAABB, const CNodeBVH2* hNodeBVH, const float* hXYZ, unsigned int nXYZ, const unsigned int* hTri, unsigned int nTri) { const thrust::device_vector<dfm2::CNodeBVH2> dNodeBVH(hNodeBVH, hNodeBVH+2*nTri-1); const thrust::device_vector<float> dXYZ(hXYZ, hXYZ+nXYZ*3); const thrust::device_vector<unsigned int> dTri(hTri, hTri+nTri*3); thrust::device_vector<CudaBV_AABB3<float>> dAABB( 2*nTri-1 ); thrust::device_vector<int> dNum(nTri-1, 0); // ----------------------------- { const unsigned int BLOCK = 512; dim3 grid((nTri - 1) / BLOCK + 1); dim3 block(BLOCK); hipLaunchKernelGGL(( kernel_BVHGeometry) , dim3(grid), dim3(block) , 0, 0, thrust::raw_pointer_cast(dAABB.data()), thrust::raw_pointer_cast(dNum.data()), // thrust::raw_pointer_cast(dNodeBVH.data()), thrust::raw_pointer_cast(dXYZ.data()), thrust::raw_pointer_cast(dTri.data()), nTri, 0.0); } // ------------------------------ hipMemcpy(hAABB, thrust::raw_pointer_cast(dAABB.data()), sizeof(CudaBV_AABB3<float>)*dAABB.size(), hipMemcpyDeviceToHost); } template <typename REAL> void dfm2::cuda::cuda_BVHGeometry_Sphere( dfm2::CBV3_Sphere<REAL>* hAABB, const CNodeBVH2* hNodeBVH, const REAL* hXYZ, unsigned int nXYZ, const unsigned int* hTri, unsigned int nTri) { const thrust::device_vector<dfm2::CNodeBVH2> dNodeBVH(hNodeBVH, hNodeBVH+2*nTri-1); const thrust::device_vector<REAL> dXYZ(hXYZ, hXYZ+nXYZ*3); const thrust::device_vector<unsigned int> dTri(hTri, hTri+nTri*3); thrust::device_vector<CudaBV_Sphere<REAL>> dAABB( 2*nTri-1 ); thrust::device_vector<int> dNum(nTri-1, 0); // ----------------------------- { const unsigned int BLOCK = 512; dim3 grid((nTri - 1) / BLOCK + 1); dim3 block(BLOCK); hipLaunchKernelGGL(( kernel_BVHGeometry) , dim3(grid), dim3(block) , 0, 0, thrust::raw_pointer_cast(dAABB.data()), thrust::raw_pointer_cast(dNum.data()), // thrust::raw_pointer_cast(dNodeBVH.data()), thrust::raw_pointer_cast(dXYZ.data()), thrust::raw_pointer_cast(dTri.data()), nTri, 0.0); } // ------------------------------ hipMemcpy(hAABB, thrust::raw_pointer_cast(dAABB.data()), sizeof(CudaBV_Sphere<REAL>)*dAABB.size(), hipMemcpyDeviceToHost); } template void dfm2::cuda::cuda_BVHGeometry_Sphere( dfm2::CBV3_Sphere<float>* hAABB, const CNodeBVH2* hNodeBVH, const float* hXYZ, unsigned int nXYZ, const unsigned int* hTri, unsigned int nTri); // ------------------------------------------------------------------------- __device__ void device_BVH_IndPoint_NearestPoint( unsigned int* ip, float* cur_dist, // const float p[3], unsigned int ibvh, const delfem2::CNodeBVH2* aNodeBVH, const CudaBV_Sphere<float>* dBVSphere) { float min0=+1.0, max0=-1.0; dBVSphere[ibvh].Range_DistToPoint(min0,max0,p); // if( max0 < min0 ){ return; } // ibvh is a inactive bvh the children should be inactive too if( *cur_dist > 0 && min0> *cur_dist ){ return; } // current range [min,max] is valid and nearer than [min0,min0]. const unsigned int ichild0 = aNodeBVH[ibvh].ichild[0]; const unsigned int ichild1 = aNodeBVH[ibvh].ichild[1]; if( ichild1 == UINT_MAX ){ // leaf assert( min0 == max0 ); // because this is point if( *cur_dist < 0 || max0 < *cur_dist ){ // current range is inactive *cur_dist = max0; *ip = ichild0; } return; } // ------------------ device_BVH_IndPoint_NearestPoint( ip,cur_dist, p, ichild0,aNodeBVH,dBVSphere); device_BVH_IndPoint_NearestPoint( ip,cur_dist, p, ichild1,aNodeBVH,dBVSphere); } template <typename REAL> __global__ void kernel_BVHNearestPoint( unsigned int* dId, // const REAL* dXYZ1, unsigned int nXYZ1, const dfm2::CNodeBVH2* dNodeBVH, unsigned int nNodeBVH, const CudaBV_Sphere<REAL>* dBVSphere) { const unsigned int ip1 = blockDim.x * blockIdx.x + threadIdx.x; if (ip1 >= nXYZ1 ) return; const float p1[3] = {dXYZ1[ip1*3+0], dXYZ1[ip1*3+1], dXYZ1[ip1*3+2]}; float cur_dist = -1; device_BVH_IndPoint_NearestPoint( dId+ip1, &cur_dist, // p1,0,dNodeBVH,dBVSphere); } template <typename REAL> void dfm2::cuda::cuda_BVH_NearestPoint( unsigned int* hInd, // const REAL* hXYZ1, unsigned int nXYZ1, const CNodeBVH2* hNodeBVH0, unsigned int nNodeBVH0, const dfm2::CBV3_Sphere<REAL>* hBVSphere0) { const thrust::device_vector<REAL> dXYZ1(hXYZ1, hXYZ1+nXYZ1*3); const thrust::device_vector<dfm2::CNodeBVH2> dNodeBVH0(hNodeBVH0, hNodeBVH0+nNodeBVH0); const thrust::device_vector<CudaBV_Sphere<REAL>> dBVSphere0(nNodeBVH0); hipMemcpy( (void*)thrust::raw_pointer_cast(dBVSphere0.data()), hBVSphere0, sizeof(CudaBV_Sphere<REAL>)*nNodeBVH0, hipMemcpyHostToDevice); thrust::device_vector<unsigned int> dInd(nXYZ1, 0); // ----------------------------- { hipDeviceSetLimit(hipLimitStackSize, 4096); const unsigned int BLOCK = 512; dim3 grid((nXYZ1 - 1) / BLOCK + 1); dim3 block(BLOCK); kernel_BVHNearestPoint << < grid, block >> > ( thrust::raw_pointer_cast(dInd.data()), // thrust::raw_pointer_cast(dXYZ1.data()), nXYZ1, thrust::raw_pointer_cast(dNodeBVH0.data()), nNodeBVH0, thrust::raw_pointer_cast(dBVSphere0.data())); } thrust::copy(dInd.begin(), dInd.end(), hInd); } template void dfm2::cuda::cuda_BVH_NearestPoint( unsigned int* hInd, // const float* hXYZ1, unsigned int nXYZ1, const CNodeBVH2* hNodeBVH0, unsigned int nNodeBVH0, const dfm2::CBV3_Sphere<float>* hBVSphere0);
a740ce1e927c2716914b9b404931bc81130af8e6.cu
/* * Copyright (c) 2019 Nobuyuki Umetani * * This source code is licensed under the MIT license found in the * LICENSE file in the root directory of this source tree. */ #include <cstdio> #include <cstdlib> #include <cassert> #include <cstdint> #include <thrust/device_vector.h> #include <thrust/copy.h> #include <thrust/sort.h> #include "cuda_runtime.h" #include "cu_bvh.h" namespace dfm2 = delfem2; __device__ void device_AtomicMaxFloat(float * const address, const float value) { if ( *address >= value ) { return; } int * const address_as_i = (int *)address; int old = * address_as_i, assumed; do { assumed = old; if (__int_as_float(assumed) >= value) { break; } old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while (assumed != old); } __device__ void device_AtomicMinFloat(float * const address, const float value) { if ( *address <= value ) { return; } int * const address_as_i = (int *)address; int old = * address_as_i, assumed; do { assumed = old; if (__int_as_float(assumed) <= value) { break; } old = atomicCAS(address_as_i, assumed, __float_as_int(value)); } while (assumed != old); } template <typename REAL> __device__ float device_Distance3( const REAL p0[3], const REAL p1[3]) { const REAL v = (p1[0]-p0[0])*(p1[0]-p0[0]) + (p1[1]-p0[1])*(p1[1]-p0[1]) + (p1[2]-p0[2])*(p1[2]-p0[2]); return sqrt(v); } __device__ unsigned int device_ExpandBits(unsigned int v) { v = (v * 0x00010001u) & 0xFF0000FFu; v = (v * 0x00000101u) & 0x0F00F00Fu; v = (v * 0x00000011u) & 0xC30C30C3u; v = (v * 0x00000005u) & 0x49249249u; return v; } __device__ unsigned int device_MortonCode(float x, float y, float z) { auto ix = (unsigned int)fmin(fmax(x * 1024.0f, 0.0f), 1023.0f); auto iy = (unsigned int)fmin(fmax(y * 1024.0f, 0.0f), 1023.0f); auto iz = (unsigned int)fmin(fmax(z * 1024.0f, 0.0f), 1023.0f); // std::cout << std::bitset<10>(ix) << " " << std::bitset<10>(iy) << " " << std::bitset<10>(iz) << std::endl; ix = device_ExpandBits(ix); iy = device_ExpandBits(iy); iz = device_ExpandBits(iz); // std::cout << std::bitset<30>(ix) << " " << std::bitset<30>(iy) << " " << std::bitset<30>(iz) << std::endl; return ix * 4 + iy * 2 + iz; } __device__ int device_Delta(int i, int j, const unsigned int* sortedMC, int nMC) { if ( j<0 || j >= nMC ){ return -1; } return __clz(sortedMC[i] ^ sortedMC[j]); } __device__ int2 device_MortonCode_DeterminRange( const unsigned int* sortedMC, int nMC, int imc) { if( imc == 0 ){ return make_int2(0,nMC-1); } // ---------------------- const std::uint32_t mc0 = sortedMC[imc-1]; const std::uint32_t mc1 = sortedMC[imc+0]; const std::uint32_t mc2 = sortedMC[imc+1]; if( mc0 == mc1 && mc1 == mc2 ){ // for hash value collision int jmc=imc+1; for(;jmc<nMC;++jmc){ if( sortedMC[jmc] != mc1 ) break; } return make_int2(imc,jmc-1); } int d = device_Delta(imc, imc + 1, sortedMC, nMC) - device_Delta(imc, imc - 1, sortedMC, nMC); d = d > 0 ? 1 : -1; //compute the upper bound for the length of the range const int delta_min = device_Delta(imc, imc - d, sortedMC, nMC); int lmax = 2; while (device_Delta(imc, imc + lmax*d, sortedMC, nMC)>delta_min) { lmax = lmax * 2; } //find the other end using binary search int l = 0; for (int t = lmax / 2; t >= 1; t /= 2) { if (device_Delta(imc, imc + (l + t)*d, sortedMC, nMC)>delta_min) { l = l + t; } } int j = imc + l*d; int2 range = make_int2(-1,-1); if (imc <= j) { range.x = imc; range.y = j; } else { range.x = j; range.y = imc; } return range; } __device__ int device_MortonCode_FindSplit( const unsigned int* sortedMC, unsigned int iMC_start, unsigned int iMC_last) { //return -1 if there is only //one primitive under this node. if (iMC_start == iMC_last) { return -1; } // ------------------------------ const int common_prefix = __clz(sortedMC[iMC_start] ^ sortedMC[iMC_last]); //handle duplicated morton code if (common_prefix == 32 ){ return iMC_start; } // sizeof(std::uint32_t)*8 // Use binary search to find where the next bit differs. // Specifically, we are looking for the highest object that // shares more than commonPrefix bits with the first one. const std::uint32_t mcStart = sortedMC[iMC_start]; int iMC_split = iMC_start; // initial guess int step = iMC_last - iMC_start; do { step = (step + 1) >> 1; // exponential decrease const int newSplit = iMC_split + step; // proposed new position if (newSplit < iMC_last){ const unsigned int splitCode = sortedMC[newSplit]; int splitPrefix = __clz(mcStart ^ splitCode); if (splitPrefix > common_prefix){ iMC_split = newSplit; // accept proposal } } } while (step > 1); return iMC_split; } template <typename REAL> class CudaBV_Sphere { public: __device__ void Set_Inactive() { r = -1; } __device__ void AddPoint(const REAL p[3], REAL R){ if( R < 0 ){ return; } if( r < 0 ){ c[0]=p[0]; c[1]=p[1]; c[2]=p[2]; r=R; return; } const REAL L = device_Distance3(p,c); if( r>L+R ){ return; } // including if( R>L+r){ // included c[0]=p[0]; c[1]=p[1]; c[2]=p[2]; r=R; return; } if( fabs(L) <= 1.0e-5*fabs(r+R) ){ // almost co-centric r = L+R; return; } const REAL r0 = 0.5*(L+r-R)/L; const REAL r1 = 0.5*(L+R-r)/L; assert( r0 >= 0 && r1 >= 0 ); c[0] = r0*c[0] + r1*p[0]; c[1] = r0*c[1] + r1*p[1]; c[2] = r0*c[2] + r1*p[2]; r = 0.5*(L+r+R); return; } __device__ void Add(const CudaBV_Sphere<REAL>& bb) { this->AddPoint(bb.c,bb.r); } __device__ void Range_DistToPoint(REAL& min0, REAL& max0, const REAL p[3]) const { if( r < 0 ){ return; } const REAL L = device_Distance3(p,c); if( L < r ){ min0 = 0; max0 = r+L; return; } min0 = L-r; max0 = L+r; } public: REAL r, c[3]; }; template <typename REAL> class CudaBV_AABB3 { public: __device__ void Set_Inactive() { bbmin[0] = +1; bbmax[0] = -1; } __device__ bool IsActive() const { if( bbmin[0] > bbmax[0] ){ return false; } return true; } __device__ void AddPoint(const REAL p[3], REAL eps){ if( eps < 0 ){ return; } if( !this->IsActive() ){ // something inside bbmin[0] = p[0]-eps; bbmax[0] = p[0]+eps; bbmin[1] = p[1]-eps; bbmax[1] = p[1]+eps; bbmin[2] = p[2]-eps; bbmax[2] = p[2]+eps; return; } bbmin[0] = ( bbmin[0] < p[0]-eps ) ? bbmin[0] : p[0]-eps; bbmin[1] = ( bbmin[1] < p[1]-eps ) ? bbmin[1] : p[1]-eps; bbmin[2] = ( bbmin[2] < p[2]-eps ) ? bbmin[2] : p[2]-eps; bbmax[0] = ( bbmax[0] > p[0]+eps ) ? bbmax[0] : p[0]+eps; bbmax[1] = ( bbmax[1] > p[1]+eps ) ? bbmax[1] : p[1]+eps; bbmax[2] = ( bbmax[2] > p[2]+eps ) ? bbmax[2] : p[2]+eps; } __device__ void Add(const CudaBV_AABB3<REAL>& bb){ if( !bb.IsActive() ){ return; } if( !this->IsActive() ){ bbmax[0] = bb.bbmax[0]; bbmin[0] = bb.bbmin[0]; bbmax[1] = bb.bbmax[1]; bbmin[1] = bb.bbmin[1]; bbmax[2] = bb.bbmax[2]; bbmin[2] = bb.bbmin[2]; return; } bbmin[0] = ( bbmin[0] < bb.bbmin[0] ) ? bbmin[0] : bb.bbmin[0]; bbmin[1] = ( bbmin[1] < bb.bbmin[1] ) ? bbmin[1] : bb.bbmin[1]; bbmin[2] = ( bbmin[2] < bb.bbmin[2] ) ? bbmin[2] : bb.bbmin[2]; bbmax[0] = ( bbmax[0] > bb.bbmax[0] ) ? bbmax[0] : bb.bbmax[0]; bbmax[1] = ( bbmax[1] > bb.bbmax[1] ) ? bbmax[1] : bb.bbmax[1]; bbmax[2] = ( bbmax[2] > bb.bbmax[2] ) ? bbmax[2] : bb.bbmax[2]; return; } public: REAL bbmin[3], bbmax[3]; }; // --------------------------------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- // --------------------------------------------------------------------------------------------------------------------- __global__ void kernel_MinMax_TPB256( float *d_minmax, const float *d_XYZ, unsigned int np) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; const unsigned int s_idx = threadIdx.x; assert( blockDim.y == 3 && blockIdx.y == 0 ); unsigned int idy = threadIdx.y; if( idx >= np ){ return; } // --------------- const unsigned int BLOCK = 256; assert(blockDim.x == BLOCK); __shared__ float s_XYZ[BLOCK][3]; s_XYZ[s_idx][idy] = d_XYZ[idx*3+idy]; __syncthreads(); if( s_idx == 0 ) { float vmin = s_XYZ[0][idy]; float vmax = s_XYZ[0][idy]; int ns = BLOCK; if( blockDim.x * (blockIdx.x+1) > np ) { ns = np - blockDim.x * blockIdx.x; } for(int is=0;is<ns;++is){ if( s_XYZ[is][idy] < vmin ){ vmin = s_XYZ[is][idy]; } if( s_XYZ[is][idy] > vmax ){ vmax = s_XYZ[is][idy]; } } device_AtomicMinFloat(d_minmax+idy+0,vmin); device_AtomicMaxFloat(d_minmax+idy+3,vmax); } } void dfm2::cuda::cuda_Min3Max3_Points3F( float *h_min3, float *h_max3, const float *h_XYZ, unsigned int np) { h_min3[0] = h_max3[0] = h_XYZ[0]; h_min3[1] = h_max3[1] = h_XYZ[1]; h_min3[2] = h_max3[2] = h_XYZ[2]; // -------------------------------------- const thrust::device_vector<float> d_XYZ(h_XYZ,h_XYZ+np*3); thrust::device_vector<float> d_minmax(6); { const unsigned int BLOCK = 256; dim3 grid((np - 1) / BLOCK + 1); dim3 block(BLOCK, 3); kernel_MinMax_TPB256 <<< grid, block >>> ( thrust::raw_pointer_cast(d_minmax.data()), thrust::raw_pointer_cast(d_XYZ.data()), np); } thrust::copy(d_minmax.begin()+0,d_minmax.begin()+3, h_min3); thrust::copy(d_minmax.begin()+3,d_minmax.begin()+6, h_max3); } // --------------------------------------------------------------------------------------------------------------------- __global__ void kernel_CentRad_MeshTri3D_TPB64( float *dXYZ_c, float *dMaxRad, const float *dXYZ, const unsigned int nXYZ, const unsigned int *dTri, const unsigned int nTri) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx >= nTri ) return; // ---------------------------- const unsigned int itri = idx; const unsigned int i0 = dTri[itri*3+0]; const unsigned int i1 = dTri[itri*3+1]; const unsigned int i2 = dTri[itri*3+2]; assert( i0 < nXYZ && i1 < nXYZ && i2 < nXYZ ); const float p0[3] = {dXYZ[i0*3+0],dXYZ[i0*3+1],dXYZ[i0*3+2]}; const float p1[3] = {dXYZ[i1*3+0],dXYZ[i1*3+1],dXYZ[i1*3+2]}; const float p2[3] = {dXYZ[i2*3+0],dXYZ[i2*3+1],dXYZ[i2*3+2]}; const float pc[3] = { (p0[0]+p1[0]+p2[0])/3.f, (p0[1]+p1[1]+p2[1])/3.f, (p0[2]+p1[2]+p2[2])/3.f }; dXYZ_c[itri*3+0] = pc[0]; dXYZ_c[itri*3+1] = pc[1]; dXYZ_c[itri*3+2] = pc[2]; // --------------------- const float l0 = device_Distance3(pc, p0); const float l1 = device_Distance3(pc, p1); const float l2 = device_Distance3(pc, p2); float lm = l0; if( l1 > lm ){ lm = l1; } if( l2 > lm ){ lm = l2; } const unsigned int TPB = 64; assert( blockDim.x == TPB ); __shared__ float sRad[TPB]; const unsigned int s_idx = threadIdx.x; sRad[s_idx] = lm; __syncthreads(); if( s_idx == 0 ) { int ns = TPB; if( blockDim.x * (blockIdx.x+1) > nTri ) { ns = nTri - blockDim.x * blockIdx.x; } float blockRadMax = sRad[0]; for(int ins=1;ins<ns;++ins){ if( sRad[ins] > blockRadMax ){ blockRadMax = sRad[ins]; } } device_AtomicMaxFloat(dMaxRad, blockRadMax); } } void dfm2::cuda::cuda_CentsMaxRad_MeshTri3F( float* hXYZ_c, float* hMaxRad, const float *hXYZ, const unsigned int nXYZ, const unsigned int *hTri, const unsigned int nTri) { const thrust::device_vector<float> dXYZ(hXYZ, hXYZ+nXYZ*3); const thrust::device_vector<unsigned int> dTri(hTri, hTri+nTri*3); thrust::device_vector<float> dMaxRad(1); thrust::device_vector<float> dXYZ_c(nTri*3); { const unsigned int BLOCK = 64; dim3 grid( (nTri-1)/BLOCK + 1 ); dim3 block( BLOCK ); kernel_CentRad_MeshTri3D_TPB64 <<< grid, block >>> ( thrust::raw_pointer_cast(dXYZ_c.data()), thrust::raw_pointer_cast(dMaxRad.data()), thrust::raw_pointer_cast(dXYZ.data()), nXYZ, thrust::raw_pointer_cast(dTri.data()), nTri); } thrust::copy(dXYZ_c.begin(), dXYZ_c.end(), hXYZ_c); thrust::copy(dMaxRad.begin(), dMaxRad.end(), hMaxRad); } // -------------------------------------------------------------------------------- __global__ void kernel_MortonCodeId_Points3F_TPB64( unsigned int *dMC, unsigned int *dId, const float *dXYZ, const unsigned int nXYZ, const float* min_xyz, const float* max_xyz) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if( idx >= nXYZ ) return; dId[idx] = idx; // ---------------------------- const float x0 = (dXYZ[idx*3+0]-min_xyz[0])/(max_xyz[0]-min_xyz[0]); const float y0 = (dXYZ[idx*3+1]-min_xyz[1])/(max_xyz[1]-min_xyz[1]); const float z0 = (dXYZ[idx*3+2]-min_xyz[2])/(max_xyz[2]-min_xyz[2]); unsigned int mc = device_MortonCode(x0,y0,z0); dMC[idx] = mc; } void dfm2::cuda::cuda_MortonCode_Points3FSorted( unsigned int *hSortedId, std::uint32_t *hSortedMc, const float *hXYZ, const unsigned int nXYZ, const float* hMinXYZ, const float* hMaxXYZ) { const thrust::device_vector<float> dXYZ(hXYZ, hXYZ+nXYZ*3); thrust::device_vector<unsigned int> dMC(nXYZ); thrust::device_vector<unsigned int> dId(nXYZ); thrust::device_vector<float> dMinXYZ(hMinXYZ,hMinXYZ+6); thrust::device_vector<float> dMaxXYZ(hMaxXYZ,hMaxXYZ+6); { const unsigned int BLOCK = 64; dim3 grid( (nXYZ-1)/BLOCK+1 ); dim3 block( BLOCK ); kernel_MortonCodeId_Points3F_TPB64 <<< grid, block >>> ( thrust::raw_pointer_cast(dMC.data()), thrust::raw_pointer_cast(dId.data()), thrust::raw_pointer_cast(dXYZ.data()), nXYZ, thrust::raw_pointer_cast(dMinXYZ.data()), thrust::raw_pointer_cast(dMaxXYZ.data())); } thrust::sort_by_key(dMC.begin(),dMC.end(),dId.begin()); thrust::copy(dMC.begin(), dMC.end(), hSortedMc); thrust::copy(dId.begin(), dId.end(), hSortedId); } // ------------------------------------------------ __global__ void kernel_MortonCode_BVHTopology_TPB64( dfm2::CNodeBVH2* dNodeBVH, const unsigned int *dSortedMC, const unsigned int *dSortedId, const unsigned int nMC) { const unsigned int idx = blockDim.x * blockIdx.x + threadIdx.x; if (idx >= nMC-1) return; const unsigned int ini = idx; const unsigned int nni = nMC-1; // ------------------------------- const int2 range = device_MortonCode_DeterminRange(dSortedMC,nMC,ini); const int isplit = device_MortonCode_FindSplit(dSortedMC,range.x,range.y); // printf("%d --> %d %d %d\n",ini, range.x, range.y, isplit); // ------------------------------- if( range.x == isplit ){ const unsigned int inlA = nni+isplit; dNodeBVH[ini].ichild[0] = inlA; dNodeBVH[inlA].iparent = ini; dNodeBVH[inlA].ichild[0] = dSortedId[isplit]; dNodeBVH[inlA].ichild[1] = UINT_MAX; } else{ const unsigned int iniA = isplit; dNodeBVH[ini].ichild[0] = iniA; dNodeBVH[iniA].iparent = ini; } // ---- if( range.y == isplit+1 ){ const unsigned int inlB = nni+isplit+1; dNodeBVH[ini].ichild[1] = inlB; dNodeBVH[inlB].iparent = ini; dNodeBVH[inlB].ichild[0] = dSortedId[isplit+1]; dNodeBVH[inlB].ichild[1] = UINT_MAX; } else{ const unsigned int iniB = isplit+1; dNodeBVH[ini].ichild[1] = iniB; dNodeBVH[iniB].iparent = ini; } } void dfm2::cuda::cuda_MortonCode_BVHTopology( CNodeBVH2* hNodeBVH, const unsigned int* aSortedId, const std::uint32_t* aSortedMc, unsigned int N) { const thrust::device_vector<std::uint32_t> dMC(aSortedMc,aSortedMc+N); const thrust::device_vector<unsigned int> dId(aSortedId,aSortedId+N); thrust::device_vector<dfm2::CNodeBVH2> dNodeBVH(N*2-1); // ---------------------------------- { const unsigned int BLOCK = 64; dim3 grid( (N-1)/BLOCK+1 ); dim3 block( BLOCK ); kernel_MortonCode_BVHTopology_TPB64 <<< grid, block >>> ( thrust::raw_pointer_cast(dNodeBVH.data()), thrust::raw_pointer_cast(dMC.data()), thrust::raw_pointer_cast(dId.data()), N); } thrust::copy(dNodeBVH.begin(), dNodeBVH.end(), hNodeBVH); hNodeBVH[0].iparent = UINT_MAX; } // ------------------------------------------------------------------------ template <typename BBOX> __global__ void kernel_BVHGeometry( BBOX* dBox, int* dNum, // const dfm2::CNodeBVH2* dNodeBVH, const float* dXYZ, const unsigned int* dTri, unsigned int nTri, float eps) { const unsigned int ino = blockDim.x * blockIdx.x + threadIdx.x; if (ino >= nTri) return; { // make aabb for triangle assert( dNodeBVH[nTri - 1 + ino].ichild[1] == UINT_MAX ); assert( dNodeBVH[nTri - 1 + ino].iparent < nTri-1 ); const int itri = dNodeBVH[nTri - 1 + ino].ichild[0]; assert(itri >= 0 && itri < nTri); const unsigned int i0 = dTri[itri * 3 + 0]; const unsigned int i1 = dTri[itri * 3 + 1]; const unsigned int i2 = dTri[itri * 3 + 2]; const float *p0 = dXYZ + i0 * 3; const float *p1 = dXYZ + i1 * 3; const float *p2 = dXYZ + i2 * 3; dBox[nTri-1+ino].Set_Inactive(); dBox[nTri-1+ino].AddPoint(p0,eps); dBox[nTri-1+ino].AddPoint(p1,eps); dBox[nTri-1+ino].AddPoint(p2,eps); } // ---------------------------------------------------- unsigned int ino0 = dNodeBVH[nTri-1+ino].iparent; while(true){ assert( ino0 < nTri-1 ); //assert( dNodeBVH[ino0].ichild[0] >= 0 ); //assert( dNodeBVH[ino0].ichild[1] >= 0 ); const unsigned int inoc0 = dNodeBVH[ino0].ichild[0]; const unsigned int inoc1 = dNodeBVH[ino0].ichild[1]; assert( dNodeBVH[inoc0].iparent == ino0 ); assert( dNodeBVH[inoc1].iparent == ino0 ); assert( inoc0 < nTri*2-1 ); assert( inoc1 < nTri*2-1 ); const int iflg_old = atomicCAS(dNum+ino0,0,1); if( iflg_old == 0 ){ // let the another branch of the binary tree do the work return; } __threadfence(); // sync global memory // --------------------------------------- dBox[ino0].Set_Inactive(); dBox[ino0].Add(dBox[inoc0]); dBox[ino0].Add(dBox[inoc1]); // ---------------------------------------- if( dNodeBVH[ino0].iparent == UINT_MAX ){ assert(ino0==0); return; } ino0 = dNodeBVH[ino0].iparent; } } void dfm2::cuda::cuda_BVHGeometry_AABB3f( dfm2::CBV3_AABB<float>* hAABB, const CNodeBVH2* hNodeBVH, const float* hXYZ, unsigned int nXYZ, const unsigned int* hTri, unsigned int nTri) { const thrust::device_vector<dfm2::CNodeBVH2> dNodeBVH(hNodeBVH, hNodeBVH+2*nTri-1); const thrust::device_vector<float> dXYZ(hXYZ, hXYZ+nXYZ*3); const thrust::device_vector<unsigned int> dTri(hTri, hTri+nTri*3); thrust::device_vector<CudaBV_AABB3<float>> dAABB( 2*nTri-1 ); thrust::device_vector<int> dNum(nTri-1, 0); // ----------------------------- { const unsigned int BLOCK = 512; dim3 grid((nTri - 1) / BLOCK + 1); dim3 block(BLOCK); kernel_BVHGeometry <<< grid, block >>> ( thrust::raw_pointer_cast(dAABB.data()), thrust::raw_pointer_cast(dNum.data()), // thrust::raw_pointer_cast(dNodeBVH.data()), thrust::raw_pointer_cast(dXYZ.data()), thrust::raw_pointer_cast(dTri.data()), nTri, 0.0); } // ------------------------------ cudaMemcpy(hAABB, thrust::raw_pointer_cast(dAABB.data()), sizeof(CudaBV_AABB3<float>)*dAABB.size(), cudaMemcpyDeviceToHost); } template <typename REAL> void dfm2::cuda::cuda_BVHGeometry_Sphere( dfm2::CBV3_Sphere<REAL>* hAABB, const CNodeBVH2* hNodeBVH, const REAL* hXYZ, unsigned int nXYZ, const unsigned int* hTri, unsigned int nTri) { const thrust::device_vector<dfm2::CNodeBVH2> dNodeBVH(hNodeBVH, hNodeBVH+2*nTri-1); const thrust::device_vector<REAL> dXYZ(hXYZ, hXYZ+nXYZ*3); const thrust::device_vector<unsigned int> dTri(hTri, hTri+nTri*3); thrust::device_vector<CudaBV_Sphere<REAL>> dAABB( 2*nTri-1 ); thrust::device_vector<int> dNum(nTri-1, 0); // ----------------------------- { const unsigned int BLOCK = 512; dim3 grid((nTri - 1) / BLOCK + 1); dim3 block(BLOCK); kernel_BVHGeometry <<< grid, block >>> ( thrust::raw_pointer_cast(dAABB.data()), thrust::raw_pointer_cast(dNum.data()), // thrust::raw_pointer_cast(dNodeBVH.data()), thrust::raw_pointer_cast(dXYZ.data()), thrust::raw_pointer_cast(dTri.data()), nTri, 0.0); } // ------------------------------ cudaMemcpy(hAABB, thrust::raw_pointer_cast(dAABB.data()), sizeof(CudaBV_Sphere<REAL>)*dAABB.size(), cudaMemcpyDeviceToHost); } template void dfm2::cuda::cuda_BVHGeometry_Sphere( dfm2::CBV3_Sphere<float>* hAABB, const CNodeBVH2* hNodeBVH, const float* hXYZ, unsigned int nXYZ, const unsigned int* hTri, unsigned int nTri); // ------------------------------------------------------------------------- __device__ void device_BVH_IndPoint_NearestPoint( unsigned int* ip, float* cur_dist, // const float p[3], unsigned int ibvh, const delfem2::CNodeBVH2* aNodeBVH, const CudaBV_Sphere<float>* dBVSphere) { float min0=+1.0, max0=-1.0; dBVSphere[ibvh].Range_DistToPoint(min0,max0,p); // if( max0 < min0 ){ return; } // ibvh is a inactive bvh the children should be inactive too if( *cur_dist > 0 && min0> *cur_dist ){ return; } // current range [min,max] is valid and nearer than [min0,min0]. const unsigned int ichild0 = aNodeBVH[ibvh].ichild[0]; const unsigned int ichild1 = aNodeBVH[ibvh].ichild[1]; if( ichild1 == UINT_MAX ){ // leaf assert( min0 == max0 ); // because this is point if( *cur_dist < 0 || max0 < *cur_dist ){ // current range is inactive *cur_dist = max0; *ip = ichild0; } return; } // ------------------ device_BVH_IndPoint_NearestPoint( ip,cur_dist, p, ichild0,aNodeBVH,dBVSphere); device_BVH_IndPoint_NearestPoint( ip,cur_dist, p, ichild1,aNodeBVH,dBVSphere); } template <typename REAL> __global__ void kernel_BVHNearestPoint( unsigned int* dId, // const REAL* dXYZ1, unsigned int nXYZ1, const dfm2::CNodeBVH2* dNodeBVH, unsigned int nNodeBVH, const CudaBV_Sphere<REAL>* dBVSphere) { const unsigned int ip1 = blockDim.x * blockIdx.x + threadIdx.x; if (ip1 >= nXYZ1 ) return; const float p1[3] = {dXYZ1[ip1*3+0], dXYZ1[ip1*3+1], dXYZ1[ip1*3+2]}; float cur_dist = -1; device_BVH_IndPoint_NearestPoint( dId+ip1, &cur_dist, // p1,0,dNodeBVH,dBVSphere); } template <typename REAL> void dfm2::cuda::cuda_BVH_NearestPoint( unsigned int* hInd, // const REAL* hXYZ1, unsigned int nXYZ1, const CNodeBVH2* hNodeBVH0, unsigned int nNodeBVH0, const dfm2::CBV3_Sphere<REAL>* hBVSphere0) { const thrust::device_vector<REAL> dXYZ1(hXYZ1, hXYZ1+nXYZ1*3); const thrust::device_vector<dfm2::CNodeBVH2> dNodeBVH0(hNodeBVH0, hNodeBVH0+nNodeBVH0); const thrust::device_vector<CudaBV_Sphere<REAL>> dBVSphere0(nNodeBVH0); cudaMemcpy( (void*)thrust::raw_pointer_cast(dBVSphere0.data()), hBVSphere0, sizeof(CudaBV_Sphere<REAL>)*nNodeBVH0, cudaMemcpyHostToDevice); thrust::device_vector<unsigned int> dInd(nXYZ1, 0); // ----------------------------- { cudaDeviceSetLimit(cudaLimitStackSize, 4096); const unsigned int BLOCK = 512; dim3 grid((nXYZ1 - 1) / BLOCK + 1); dim3 block(BLOCK); kernel_BVHNearestPoint << < grid, block >> > ( thrust::raw_pointer_cast(dInd.data()), // thrust::raw_pointer_cast(dXYZ1.data()), nXYZ1, thrust::raw_pointer_cast(dNodeBVH0.data()), nNodeBVH0, thrust::raw_pointer_cast(dBVSphere0.data())); } thrust::copy(dInd.begin(), dInd.end(), hInd); } template void dfm2::cuda::cuda_BVH_NearestPoint( unsigned int* hInd, // const float* hXYZ1, unsigned int nXYZ1, const CNodeBVH2* hNodeBVH0, unsigned int nNodeBVH0, const dfm2::CBV3_Sphere<float>* hBVSphere0);
4919e56601a9719e354d3917959b3e870e287441.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <stdlib.h> #include <math.h> #include "extern_var.h" #include "hip/hip_runtime_api.h" #include <hip/hip_runtime.h> // #include <hip/hip_runtime.h> // #include <device_launch_parameters.h> //#include<conio.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <hip/hip_runtime_api.h> #include <cudaProfiler.h> // __device__ int ThreeDMapD(int i,int j,int k,int SizeZ,int SizeY){ // int num = k + SizeZ*j +SizeY*SizeZ*i; // return num; // } // // // __device__ int FourDMapD(int i,int j,int k,int n,int SizeN,int SizeZ,int SizeY){ // int num = n + SizeN*( k + SizeZ*j +SizeY*SizeZ*i); // return num; // } // // __device__ int TwoDMapD(int i,int j,int size){ // int num = j + i*size; // return num; // } //update B-field void UPDATE_B(){ // if(TEz && polar_psi==0){ // UPDATE_hx(); // UPDATE_hz(); // } // else if(TMz && polar_psi==0){ // UPDATE_hy(); // } //else{ int Number; int threadsPerBlock = 256; Number = NCELLX * NCELLY *NCELLZ; int blocksPerGrid = Number/threadsPerBlock + 1; // hipProfilerStart() ; // // UPDATE_hx <<<blocksPerGrid, threadsPerBlock>>> (hx,ez,ey,Chxh,Chxe,psi_Hx_z_N,psi_Hx_z_F,psi_Hx_y_N,psi_Hx_y_F,khdy,khdz,bh_z_N,bh_z_F,ch_z_N,ch_z_F,bh_y_N,bh_y_F,ch_y_N,ch_y_F,NCELLX,NCELLY,NCELLZ,Periodic_XY,dx,dy,dz,dt,cpml_N_Z,cpml_F_Z,cpml_N_Y,cpml_F_Y,cpml_z_lim,cpml_y_lim,cpml_x_lim,NcpmlZ,NcpmlY); // UPDATE_hy <<<blocksPerGrid, threadsPerBlock>>> (hy,ez,ex,Chyh,Chye,psi_Hy_z_N,psi_Hy_z_F,psi_Hy_x_N,psi_Hy_x_F,khdx,khdz,bh_z_N,bh_z_F,ch_z_N,ch_z_F,bh_x_N,bh_x_F,ch_x_N,ch_x_F,NCELLX,NCELLY,NCELLZ,Periodic_XY,dx,dy,dz,dt,cpml_N_Z,cpml_F_Z,cpml_N_X,cpml_F_X,cpml_z_lim,cpml_y_lim,cpml_x_lim,NcpmlZ,NcpmlX); // UPDATE_hz <<<blocksPerGrid, threadsPerBlock>>> (hz,ey,ex,Chzh,Chze,psi_Hz_x_N,psi_Hz_x_F,psi_Hz_y_N,psi_Hz_y_F,khdx,khdy,bh_x_N,bh_x_F,ch_x_N,ch_x_F,bh_y_N,bh_y_F,ch_y_N,ch_y_F,NCELLX,NCELLY,NCELLZ,Periodic_XY,dx,dy,dz,dt,cpml_N_X,cpml_F_X,cpml_N_Y,cpml_F_Y,cpml_z_lim,cpml_y_lim,cpml_x_lim,NcpmlY,NcpmlX); hipDeviceSynchronize(); // hipProfilerStop() ; // UPDATE_hz(); // } } //update E-field void UPDATE_E(){ int i,j,k,n; comp hold; int Number; int threadsPerBlock = 256; Number = NCELLX * NCELLY *NCELLZ; int blocksPerGrid = Number/threadsPerBlock + 1; // // // UPDATE_ex(); // UPDATE_ex <<<blocksPerGrid, threadsPerBlock>>> (ex,ex_n,ex_n_1,hy,hz,Cexe,Cexh,kedy,kedz,mat_matrix,mat_matrixX,first_medium_max,psi_Ex_z_N,psi_Ex_z_F,psi_Ex_y_N,psi_Ex_y_F,Px_cp,Px_cp_n,Px_cp_n_1,Px_d,Px_d_n,Px_d_n_1, // C_1_cp,C_2_cp,C_3_cp,C_4_cp,C_5_cp,d_1_d,d_2_d,d_3_d,d_4_d,d_5_d,d_NL,C_E,z0,N_CP_poles,N_drude_poles,ce_z_N,ce_z_F,be_z_N,be_z_F,ce_y_N,ce_y_F,be_y_N,be_y_F,dx,dy,dz,dt,NCELLX,NCELLY,NCELLZ, // Hydrodynamics,cpml_x_lim,cpml_y_lim,cpml_z_lim,cpml_N_Y,cpml_F_Y,cpml_N_Z,cpml_F_Z,NcpmlY,NcpmlZ, C_E_1,C_E_2,Periodic_XY); // //UPDATE_ey(); // UPDATE_ey <<<blocksPerGrid, threadsPerBlock>>> (ey,ey_n,ey_n_1,hx,hz,Ceye,Ceyh,kedx,kedz,mat_matrix,mat_matrixY,first_medium_max,psi_Ey_z_N,psi_Ey_z_F,psi_Ey_x_N,psi_Ey_x_F,Py_cp,Py_cp_n,Py_cp_n_1,Py_d,Py_d_n,Py_d_n_1, // C_1_cp,C_2_cp,C_3_cp,C_4_cp,C_5_cp,d_1_d,d_2_d,d_3_d,d_4_d,d_5_d,d_NL,C_E,z0,N_CP_poles,N_drude_poles,ce_z_N,ce_z_F,be_z_N,be_z_F,ce_y_N,ce_x_F,be_x_N,be_x_F,dx,dy,dz,dt,NCELLX,NCELLY,NCELLZ, // Hydrodynamics,cpml_x_lim,cpml_y_lim,cpml_z_lim,cpml_N_X,cpml_F_X,cpml_N_Z,cpml_F_Z,NcpmlX,NcpmlZ, C_E_1,C_E_2,Periodic_XY); // // // UPDATE_ez(); // UPDATE_ez <<<blocksPerGrid, threadsPerBlock>>> (ez,ez_n,ez_n_1,hx,hy,Ceze,Cezh,kedx,kedy,mat_matrix,mat_matrixZ,first_medium_max,psi_Ez_y_N,psi_Ez_y_F,psi_Ez_x_N,psi_Ez_x_F,Pz_cp,Pz_cp_n,Pz_cp_n_1,Pz_d,Pz_d_n,Pz_d_n_1, // C_1_cp,C_2_cp,C_3_cp,C_4_cp,C_5_cp,d_1_d,d_2_d,d_3_d,d_4_d,d_5_d,d_NL,C_E,z0,N_CP_poles,N_drude_poles,ce_y_N,ce_y_F,be_y_N,be_y_F,ce_x_N,ce_x_F,be_x_N,be_x_F,dx,dy,dz,dt,NCELLX,NCELLY,NCELLZ, // Hydrodynamics,cpml_x_lim,cpml_y_lim,cpml_z_lim,cpml_N_X,cpml_F_X,cpml_N_Y,cpml_F_Y,NcpmlX,NcpmlY, C_E_1,C_E_2,Periodic_XY); // hipDeviceSynchronize(); // for(i=0;i<NCELLX;i++){ // for(j=0;j<NCELLY;j++){ // printf("%e\t",Px_d[i][j][dispersive_slab+20][0]); // //printf("%e\t",hx[i][j][dispersive_slab+20]); // } // printf("\n"); // } ////#pragma omp parallel for collapse(3) private(i,j,k,n) //// schedule(guided) for(i=0;i<NCELLX;i++){ for(j=0;j<NCELLY;j++){ for(k=0;k<NCELLZ;k++){ if(mat_matrixX[ThreeDMap(i,j,k,NCELLZ,NCELLY)] < 6 || mat_matrixY[ThreeDMap(i,j,k,NCELLZ,NCELLY)] <6 || mat_matrixZ[ThreeDMap(i,j,k,NCELLZ,NCELLY)] <6 || mat_matrix[ThreeDMap(i,j,k,NCELLZ,NCELLY)] <6){ for(n=0;n<N_drude_poles;n++){ Px_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Px_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Px_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Px_d[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Py_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Py_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Py_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Py_d[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Pz_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Pz_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Pz_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Pz_d[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; } for(n=0;n<N_CP_poles;n++){ Px_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Px_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Px_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Px_cp[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Py_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Py_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Py_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Py_cp[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Pz_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Pz_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Pz_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Pz_cp[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; } if(Hydrodynamics){ hxPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = hx[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; hyPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = hy[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; hzPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = hz[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; hold = NDx[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDx[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=hold; hold = NDy[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDy[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=hold; hold = NDz[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDz[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=hold; } } } } } } // // __global__ void UPDATE_ex(real *ex,real *ex_n,real *ex_n_1,real *hy,real *hz,real *Cexe,real *Cexh,real *kedy,real *kedz,int *mat_matrix,int *mat_matrixX,int first_medium_max,real *psi_Ex_z_N, // real *psi_Ex_z_F,real *psi_Ex_y_N,real *psi_Ex_y_F,real *Px_cp,real *Px_cp_n,real *Px_cp_n_1,real *Px_d,real *Px_d_n,real *Px_d_n_1,real *C_1_cp,real *C_2_cp,real *C_3_cp,real *C_4_cp,real *C_5_cp,real *d_1_d, // real *d_2_d,real *d_3_d,real *d_4_d,real *d_5_d,real *d_NL,real C_E,real z0,int N_CP_poles,int N_drude_poles,real *ce_z_N,real *ce_z_F,real *be_z_N,real *be_z_F,real *ce_y_N,real *ce_y_F,real *be_y_N,real *be_y_F, // real dx,real dy,real dz,real dt,int NCELLX,int NCELLY,int NCELLZ,int Hydrodynamics,int cpml_x_lim,int cpml_y_lim,int cpml_z_lim,int cpml_N_Y,int cpml_F_Y,int cpml_N_Z,int cpml_F_Z,int NcpmlY,int NcpmlZ,real C_E_1,real C_E_2,int Periodic_XY){ // int i,j,k,n,k2,j2; // comp Curl_H, Div_Grad=0.0,J_T,dummy_var; // comp Vx1,Vx2,Vy1,Vy2,Vz1,Vz2,Nx1,Nx2,Ny1,Ny2,Nz1,Nz2; // comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; // double INV_DX = 1.0/dx; // double INV_DY = 1.0/dy; // double INV_DZ = 1.0/dz; // // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // // if(Periodic_XY){ // ////#pragma omp parallel for collapse(3) private(Curl_H,i,j,j2,k,k2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(k=1;k<NCELLZ-1;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX && j<NCELLY && k>0 && k<(NCELLZ-1)){ // //for(k=1;k<NCELLZ-1;k++){ // if(j==0){ // #ifdef DOUBLECOMPLEX // Curl_H=(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)]*cexp(I*k_y*period_y))/kedy[j]-(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)])/kedy[j]-(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]; // #endif // // } // else{ // Curl_H=(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]-(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]; // } // // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] < 6){ // // // // Div_Grad = Calc_DIV_GRADx(i,j,k); // Div_Grad = 0.0; // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1)*Px_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Px_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1)*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4 - C_P_NL); // // // //printf("%e\n",Div_Grad); // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Near-Z-PML // psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // k2 = k - cpml_F_Z ; // psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=(1/C_E)*dt*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // } // // for(n=0;n<N_CP_poles;n++){ // Px_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // for(n=0;n<N_drude_poles;n++){ // Px_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Px_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Px_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // // } // // } // // else{ // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Cexe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Near-Z-PML // psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_z_N[k]*psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_z_N[k]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // k2 = k - cpml_F_Z ; // psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]; // } // } // } // // // // // } // // } // // } // } // //No PBCs // else{ // ////#pragma omp target teams distribute parallel for collapse(3) schedule(static,1) private(Curl_H,i,j,j2,k,k2,n,dummy_var,J_T,Div_Grad) // // //#pragma omp parallel for collapse(3) private(Curl_H,i,j,j2,k,k2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(i=0;i<NCELLX-1;i++){ // // for(j=1;j<NCELLY-1;j++){ // // for(k=1;k<NCELLZ-1;k++){ // if(i<(NCELLX-1) && j>0 && j<(NCELLY-1) && k>0 && k<(NCELLZ-1)){ // Curl_H=(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]-(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]; // // if(mat_matrixX[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrixX[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] < 6){ // // // if(Hydrodynamics == 0) // { // //Div_Grad = Calc_DIV_GRADx(i,j,k); // Div_Grad = 0.0; // // CP_D_ex(i,j,k,Curl_H,Div_Grad); // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1)*Px_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Px_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1)*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4 - C_P_NL); // // // // // for(n=0;n<N_CP_poles;n++){ // Px_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // for(n=0;n<N_drude_poles;n++){ // Px_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Px_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Px_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // // } // } // // else{ // // // // Vx1 = Px_d_n[FourDMapD(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]; // // Vx2 = Px_d_n[FourDMapD(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]; // // Nx1 = NDx_prev[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]; // // Nx2 = NDx_prev[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)]; // // // // Vy1 = 0.5*(Py_d_n[FourDMapD(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vy2 = 0.5*(Py_d_n[FourDMapD(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMapD(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Ny1 = 0.5*(NDy_prev[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)] + NDy_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Ny2 = 0.5*(NDy_prev[ThreeDMapD(i+1,j-1,k,NCELLZ,NCELLY)] + NDy_prev[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)]); // // // // Vz1 = 0.5*(Pz_d_n[FourDMapD(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vz2 = 0.5*(Pz_d_n[FourDMapD(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMapD(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // // Nz1 = 0.5*(NDz_prev[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)] + NDz_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Nz2 = 0.5*(NDz_prev[ThreeDMapD(i+1,j,k-1,NCELLZ,NCELLY)] + NDz_prev[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)]); // // // // NDx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] = NDx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] - 2.0*dt*INV_DX*(0.5*(Nx1*Vx1-Nx2*Vx2) + (Ny1*Vy1-Ny2*Vy2) + (Nz1*Vz1-Nz2*Vz2) + (0.5*(Vx1-Vx2) + (Vy1-Vy2) + (Vz1-Vz2))*N_EQ); // // // // // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // // // // // for(n=0;n<N_CP_poles;n++){ // // C_P_2+=(C_1_cp[n]-1)*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // C_P_4+=(C_2_cp[n])*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // } // // ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0 + dt*Px_d[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*e0*(N_EQ + NDx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]) -C_E_2*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_2-C_P_4); // // // // for(n=0;n<N_CP_poles;n++){ // // Px_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // // // // // } // // // printf("%e\n",Div_Grad); // //Z-CPML // // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // // //Near-Z-PML // // psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_z_N[k]*psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_z_N[k]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // // k2 = k - cpml_F_Z ; // // psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=(1/C_E)*dt*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // // } // // // // } // // else{ // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Cexe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // } // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Near-Z-PML // psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ //Far Z PML // k2 = k - cpml_F_Z; // psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // if(mat_matrixX[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=(1/C_E)*dt*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // // } // // else{ // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]; // //} // } // //Y PML // if(j<cpml_N_Y+1 && i<cpml_x_lim && k<cpml_z_lim){ //Near Y PML // psi_Ex_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]=be_y_N[j]*psi_Ex_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]+ce_y_N[j]*(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/dy; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]; // } // if(j>=cpml_F_Y && i<cpml_x_lim && k<cpml_z_lim){ //Far Y PML // j2 = j - cpml_F_Y; // psi_Ex_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]=be_y_F[j2]*psi_Ex_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]+ce_y_F[j2]*(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/dy; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]; // } // // if(mat_matrixX[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // // for(n=0;n<N_CP_poles;n++){ // // Px_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // for(n=0;n<N_drude_poles;n++){ // // Px_d[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=d_1_d[n]*Px_d_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+d_2_d[n]*Px_d_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+d_3_d[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // } // } // // } // // } // // } // } // // return; // } // // // // // // // __global__ void UPDATE_ey(real *ey,real *ey_n,real *ey_n_1,real *hx,real *hz,real *Ceye,real *Ceyh,real *kedx,real *kedz,int *mat_matrix,int *mat_matrixY,int first_medium_max,real *psi_Ey_z_N, // real *psi_Ey_z_F,real *psi_Ey_x_N,real *psi_Ey_x_F,real *Py_cp,real *Py_cp_n,real *Py_cp_n_1,real *Py_d,real *Py_d_n,real *Py_d_n_1,real *C_1_cp,real *C_2_cp,real *C_3_cp,real *C_4_cp,real *C_5_cp,real *d_1_d, // real *d_2_d,real *d_3_d,real *d_4_d,real *d_5_d,real *d_NL,real C_E,real z0,int N_CP_poles,int N_drude_poles,real *ce_z_N,real *ce_z_F,real *be_z_N,real *be_z_F,real *ce_x_N,real *ce_x_F,real *be_x_N,real *be_x_F, // real dx,real dy,real dz,real dt,int NCELLX,int NCELLY,int NCELLZ,int Hydrodynamics,int cpml_x_lim,int cpml_y_lim,int cpml_z_lim,int cpml_N_X,int cpml_F_X,int cpml_N_Z,int cpml_F_Z,int NcpmlX,int NcpmlZ,real C_E_1,real C_E_2,int Periodic_XY){ // int i,j,k,i2,k2,n; // comp Curl_H,Div_Grad=0.0,dummy_var,J_T; // comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; // double INV_DX = 1.0/dx; // double INV_DY = 1.0/dy; // double INV_DZ = 1.0/dz; // comp Vx1,Vx2,Vy1,Vy2,Vz1,Vz2,Nx1,Nx2,Ny1,Ny2,Nz1,Nz2; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // // if(Periodic_XY){ // ////#pragma omp parallel for collapse(3) private(Curl_H,i,j,i2,k,k2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(k=0;k<NCELLZ-1;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX && j<NCELLY && k>0 && k<(NCELLZ-1)){ // // for(k=1;k<NCELLZ-1;k++){ // if(i==0){ // #ifdef DOUBLECOMPLEX // Curl_H=(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]-(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)]*cexp(I*period_x*k_x))/kedx[i]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]-(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)])/kedx[i]; // #endif // //printf("%d,%d,%d \t %f\t%f\t%f\n",i,j,k,creal(Curl_H),creal(hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)]), creal(hz[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)])); // // } // // else{ // Curl_H=(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]-(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]; // } // if(mat_matrixY[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrixY[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] <6){ // // //Div_Grad = Calc_DIV_GRADy(i,j,k); // Div_Grad = 0.0; // // printf("%e\n",d_NL[0]*Div_Grad); // // CP_D_ey(i,j,k,Curl_H,Div_Grad); // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // //printf("here"); // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1.0)*Py_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Py_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1.0)*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); // // // // printf("%e\n",ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the near Z-PML // psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the far Z-PML // k2 = k - cpml_F_Z ; // psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=(1/C_E)*dt*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // } // // for(n=0;n<N_CP_poles;n++){ // Py_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // //if(Hydrodynamics == 0){ // for(n=0;n<N_drude_poles;n++){ // Py_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Py_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Py_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // // } // //} // // } // // else{ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Ceye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the near Z-PML // psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the far Z-PML // k2 = k - cpml_F_Z ; // psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=(1/C_E)*dt*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // } // else{ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]; // } // } // } // // } // // // } // // } // // } // } // // else{ // // //#pragma omp parallel for collapse(3) private(Curl_H,i,j,i2,k,k2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(i=1;i<NCELLX-1;i++){ // // for(j=0;j<NCELLY-1;j++){ // // for(k=1;k<NCELLZ-1;k++){ // if(i>0 && i<(NCELLX-1) && j<(NCELLY-1) && k>0 && k<(NCELLZ-1)){ // Curl_H=(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]-(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]; // if(mat_matrixY[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrixY[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] <6){ // // if(Hydrodynamics == 0){ // //Div_Grad = Calc_DIV_GRADy(i,j,k); // Div_Grad= 0.0; // // printf("%e\n",d_NL[0]*Div_Grad); // // CP_D_ey(i,j,k,Curl_H,Div_Grad); // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // //printf("here"); // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1.0)*Py_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Py_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1.0)*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); // // // printf("%e\n",ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // //Z-CPML // // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // // //Here we are in the near Z-PML // // psi_Ey_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_z_N[k]*psi_Ey_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_z_N[k]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // // //Here we are in the far Z-PML // // k2 = k - cpml_F_Z ; // // psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=(1/C_E)*dt*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // // } // // for(n=0;n<N_CP_poles;n++){ // Py_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // for(n=0;n<N_drude_poles;n++){ // Py_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Py_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Py_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // // } // // } // // else{ // // // // Vy1 = Py_d_n[FourDMapD(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]; // // Vy2 = Py_d_n[FourDMapD(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]; // // Ny1 = NDy_prev[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]; // // Ny2 = NDy_prev[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)]; // // // // Vx1 = 0.5*(Px_d_n[FourDMapD(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx2 = 0.5*(Px_d_n[FourDMapD(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMapD(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Nx1 = 0.5*(NDx_prev[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)] + NDx_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Nx2 = 0.5*(NDx_prev[ThreeDMapD(i-1,j+1,k,NCELLZ,NCELLY)] + NDx_prev[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)]); // // // // Vz1 = 0.5*(Pz_d_n[FourDMapD(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vz2 = 0.5*(Pz_d_n[FourDMapD(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMapD(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // // Nz1 = 0.5*(NDz_prev[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)] + NDz_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Nz2 = 0.5*(NDz_prev[ThreeDMapD(i,j+1,k-1,NCELLZ,NCELLY)] + NDz_prev[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)]); // // // // NDy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] = NDy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] - 2.0*dt*INV_DX*((Nx1*Vx1-Nx2*Vx2) + 0.5*(Ny1*Vy1-Ny2*Vy2) + (Nz1*Vz1-Nz2*Vz2) + ((Vx1-Vx2) + 0.5*(Vy1-Vy2) + (Vz1-Vz2))*N_EQ); // // // // // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // // // for(n=0;n<N_CP_poles;n++){ // // C_P_2+=(C_1_cp[n]-1.0)*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // C_P_4+=(C_2_cp[n])*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // } // // ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0 + dt*Py_d[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*e0*(N_EQ + NDy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]) + C_E_1*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_2-C_P_4); // // for(n=0;n<N_CP_poles;n++){ // // Py_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // // // } // } // // // else{ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Ceye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // // } // // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the near Z-PML // psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the far Z-PML // k2 = k - cpml_F_Z; // psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=(1/C_E)*dt*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // // } // // else{ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]; // //} // } // //X-CPML // if(i<cpml_N_X+1 && j<cpml_y_lim && k<cpml_z_lim){ // //Here we are in the near-X-PML // psi_Ey_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_x_N[i]*psi_Ey_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_x_N[i]*(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/dx; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(i>=cpml_F_X && j<cpml_y_lim && k<cpml_z_lim){ // //Here we are in the far-X-PML // i2 = i - cpml_F_X; // psi_Ey_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]=be_x_F[i2]*psi_Ey_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]+ce_x_F[i2]*(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/dx; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]; // } // // // } // // } // // } // // // // } // // } // // return; // } // // // //void UPDATE_ez(void){ // __global__ void UPDATE_ez(real *ez,real *ez_n,real *ez_n_1,real *hx,real *hy,real *Ceze,real *Cezh,real *kedx,real *kedy,int *mat_matrix,int *mat_matrixZ,int first_medium_max,real *psi_Ez_y_N, // real *psi_Ez_y_F,real *psi_Ez_x_N,real *psi_Ez_x_F,real *Pz_cp,real *Pz_cp_n,real *Pz_cp_n_1,real *Pz_d,real *Pz_d_n,real *Pz_d_n_1,real *C_1_cp,real *C_2_cp,real *C_3_cp,real *C_4_cp,real *C_5_cp,real *d_1_d, // real *d_2_d,real *d_3_d,real *d_4_d,real *d_5_d,real *d_NL,real C_E,real z0,int N_CP_poles,int N_drude_poles,real *ce_y_N,real *ce_y_F,real *be_y_N,real *be_y_F,real *ce_x_N,real *ce_x_F,real *be_x_N,real *be_x_F, // real dx,real dy,real dz,real dt,int NCELLX,int NCELLY,int NCELLZ,int Hydrodynamics,int cpml_x_lim,int cpml_y_lim,int cpml_z_lim,int cpml_N_X,int cpml_F_X,int cpml_N_Y,int cpml_F_Y,int NcpmlX,int NcpmlY,real C_E_1,real C_E_2,int Periodic_XY){ // // int i,j,k,i2,j2,n; // comp Curl_H,Div_Grad=0.0,dummy_var,J_T; // comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; // comp Vx1,Vx2,Vy1,Vy2,Vz1,Vz2,Nx1,Nx2,Ny1,Ny2,Nz1,Nz2; // // double INV_DX = 1.0/dx; // double INV_DY = 1.0/dy; // double INV_DZ = 1.0/dz; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // // if(Periodic_XY){ // // //#pragma omp parallel for collapse(3) private(Curl_H,i,j,k,i2,j2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(k=0;k<NCELLZ-1;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX,j<NCELLY,k<NCELLZ){ // // for(k=0;k<NCELLZ-1;k++){ // //printf("Thread %d, ready to work\n",omp_get_thread_num()); // // if(i==0 || j==0){ // if(i==0 && j==0){ // #ifdef DOUBLECOMPLEX // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)]*cexp(I*k_x*period_x))/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)]*cexp(I*k_y*period_y))/kedy[j]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)])/kedy[j]; // #endif // // // } // else if(i==0){ // #ifdef DOUBLECOMPLEX // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)]*cexp(I*k_x*period_x))/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]; // #endif // // // } // else{ // #ifdef DOUBLECOMPLEX // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)]*cexp(I*k_y*period_y))/kedy[j]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)])/kedy[j]; // #endif // // // } // } // // else{ // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]; // // } // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]<6){ // // // Div_Grad = Calc_DIV_GRADz(i,j,k); // Div_Grad = 0.0; // // // CP_D_ez(i,j,k,Curl_H,Div_Grad); // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1)*Pz_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Pz_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1)*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); // // // for(n=0;n<N_CP_poles;n++){ // Pz_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // for(n=0;n<N_drude_poles;n++){ // Pz_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Pz_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Pz_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // } // } // // else{ // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Ceze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // } // // } // // } // // } // // } // } // // else{ // // //#pragma omp parallel for collapse(3) private(Curl_H,i,j,k,i2,j2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(i=1;i<NCELLX-1;i++){ // // for(j=1;j<NCELLY-1;j++){ // // for(k=0;k<NCELLZ-1;k++){ // if(i>0 && i<(NCELLX-1) && j>0 && j<(NCELLY-1) && k<(NCELLZ-1)){ // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]; // if(mat_matrixZ[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrixZ[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]<6){ // // if(Hydrodynamics == 0){ // //Div_Grad = Calc_DIV_GRADz(i,j,k); // Div_Grad = 0.0; // // CP_D_ez(i,j,k,Curl_H,Div_Grad); // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1)*Pz_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Pz_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1)*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); // // // for(n=0;n<N_CP_poles;n++){ // Pz_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // for(n=0;n<N_drude_poles;n++){ // Pz_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Pz_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Pz_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // } // // } // // else{ // // // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // // // Vz1 = Pz_d_n[FourDMapD(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]; // // Vz2 = Pz_d_n[FourDMapD(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]; // // Nz1 = NDz_prev[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]; // // Nz2 = NDz_prev[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)]; // // // // Vx1 = 0.5*(Px_d_n[FourDMapD(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx2 = 0.5*(Px_d_n[FourDMapD(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMapD(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Nx1 = 0.5*(NDx_prev[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)] + NDx_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Nx2 = 0.5*(NDx_prev[ThreeDMapD(i-1,j,k+1,NCELLZ,NCELLY)] + NDx_prev[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)]); // // // // Vy1 = 0.5*(Py_d_n[FourDMapD(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vy2 = 0.5*(Py_d_n[FourDMapD(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMapD(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Ny1 = 0.5*(NDy_prev[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)] + NDy_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Ny2 = 0.5*(NDy_prev[ThreeDMapD(i,j-1,k+1,NCELLZ,NCELLY)] + NDy_prev[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)]); // // // // NDz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] = NDz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] - 2.0*dt*INV_DX*((Nx1*Vx1-Nx2*Vx2) + (Ny1*Vy1-Ny2*Vy2) + 0.5*(Vz1-Vz2) + ((Vx1-Vx2) + (Vy1-Vy2) + 0.5*(Vz1-Vz2))*N_EQ); // // // // // // for(n=0;n<N_CP_poles;n++){ // // C_P_2+=(C_1_cp[n]-1)*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // C_P_4+=(C_2_cp[n])*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // } // // ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0 + dt*Pz_d[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*e0*(NDz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + N_EQ)+C_E_1*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_2-C_P_4); // // // // for(n=0;n<N_CP_poles;n++){ // // Pz_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // } // } // // // else{ // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Ceze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // } // //Y CPML // if(j<cpml_N_Y && i<cpml_x_lim && k<cpml_z_lim){ //Near Y PML // psi_Ez_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]=be_y_N[j]*psi_Ez_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]+ce_y_N[j]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/dy; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ez_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]; // } // if(j>=cpml_F_Y && i<cpml_x_lim && k<cpml_z_lim){ // j2 = j - cpml_F_Y; // psi_Ez_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]=be_y_F[j2]*psi_Ez_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]+ce_y_F[j2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/dy; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ez_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]; // } // //X PML // if(i<cpml_N_X+1 && j<cpml_y_lim && k<cpml_z_lim){//Near X-PML // psi_Ez_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_x_N[i]*psi_Ez_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_x_N[i]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/dx; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ez_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(i>=cpml_F_X && j<cpml_y_lim && k<cpml_z_lim){//far X-PML // i2 = i - cpml_F_X; // psi_Ez_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]=be_x_F[i2]*psi_Ez_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]+ce_x_F[i2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/dx; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ez_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]; // } // // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // // for(n=0;n<N_CP_poles;n++){ // // Pz_cp[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Pz_cp_n[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Pz_cp_n_1[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // for(n=0;n<N_drude_poles;n++){ // // Pz_d[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Pz_d_n[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Pz_d_n_1[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // } // // } // // } // // } // // // // } // } // // return; // } // void CP_D_ex(int i,int j, int k, comp Curl_H,comp Div_Grad){ int n; comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; for(n=0;n<N_drude_poles;n++){ C_P_1+=(d_1_d[n]-1)*Px_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_3+=(d_2_d[n])*Px_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_NL += d_NL[n]*Div_Grad; } for(n=0;n<N_CP_poles;n++){ C_P_2+=(C_1_cp[n]-1)*Px_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; C_P_4+=(C_2_cp[n])*Px_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; } ex_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ex_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ex_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ex[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ex[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ex_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_E_2*ex_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4 - C_P_NL); } void CP_D_ey(int i,int j, int k, comp Curl_H,comp Div_Grad ){ int n; comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; //printf("here"); for(n=0;n<N_drude_poles;n++){ C_P_1+=(d_1_d[n]-1.0)*Py_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_3+=(d_2_d[n])*Py_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_NL += d_NL[n]*Div_Grad; } for(n=0;n<N_CP_poles;n++){ C_P_2+=(C_1_cp[n]-1.0)*Py_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; C_P_4+=(C_2_cp[n])*Py_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; } ey_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ey_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ey_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ey[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ey[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ey_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_E_2*ey_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); } void CP_D_ez(int i,int j, int k, comp Curl_H, comp Div_Grad){ int n; comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; for(n=0;n<N_drude_poles;n++){ C_P_1+=(d_1_d[n]-1)*Pz_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_3+=(d_2_d[n])*Pz_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_NL += d_NL[n]*Div_Grad; } for(n=0;n<N_CP_poles;n++){ C_P_2+=(C_1_cp[n]-1)*Pz_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; C_P_4+=(C_2_cp[n])*Pz_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; } ez_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ez_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ez_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ez[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ez[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ez_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_E_2*ez_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); } // // // __global__ void UPDATE_hx(real *hx,real *ez,real *ey,real *Chxh,real *Chxe,real *psi_Hx_z_N,real *psi_Hx_z_F,real *psi_Hx_y_N,real *psi_Hx_y_F,real *khdy,real // *khdz,real *bh_z_N,real *bh_z_F,real *ch_z_N,real *ch_z_F,real *bh_y_N,real *bh_y_F,real *ch_y_N,real *ch_y_F,int NCELLX,int NCELLY,int NCELLZ,int Periodic_XY,real dx,real dy,real dz,real dt,int cpml_N_Z,int cpml_F_Z,int cpml_N_Y,int cpml_F_Y,int cpml_z_lim,int cpml_y_lim,int cpml_x_lim,int NcpmlZ,int NcpmlY){ // //void UPDATE_hx(void){ // // hipProfilerStart(); // // int i,j,k,j2,k2; // comp Curl_E; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // // if(Periodic_XY){ // ////#pragma omp parallel for collapse(3) private(i,j,k,Curl_E,j2,k2) // schedule(static) // // for(k=0;k<NCELLZ;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // // if(i<NCELLX && j<NCELLY && k<NCELLZ){ // // for(k=0;k<NCELLZ-1;k++){ // //if(i==1) printf("%d %d %d\n",i,j,k); // if(j==NCELLY-1){ // #ifdef DOUBLECOMPLEX // Curl_E=(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]-(ez[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]*cexp(-I*k_y*period_y)-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]-(ez[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]; // #endif // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chxh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // // } // else{ // Curl_E=(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]-(ez[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chxh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // //Z-CPML // if(k<cpml_N_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Near Z-PML // psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]=bh_z_N[k]*psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]+ch_z_N[k]*(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]; // } // if(k>=cpml_F_Z && j<cpml_y_lim && i<cpml_x_lim){ // //Far Z-PML // k2 = k - cpml_F_Z; // psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]=bh_z_F[k2]*psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]+ch_z_F[k2]*(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]; // } // } // // } // // } // // } // } // // else{ // // //#pragma omp target device(0) MapD(Chxe[:NCELLX-1][:NCELLY-1][:NCELLZ-1],Chxh[:NCELLX-1][:NCELLY-1][:NCELLZ-1],ez[:NCELLX-1][:NCELLY-1][:NCELLZ-1],ey[:NCELLX-1][:NCELLY-1][:NCELLZ-1],khdy[:NCELLY-1],khdz[:NCELLZ-1],bh_z_N[:NcpmlZ-1],bh_z_F[:NcpmlZ-1],ch_z_N[:NcpmlZ-1],ch_z_F[:NcpmlZ-1],bh_y_N[:NcpmlY-1],bh_y_F[:NcpmlY-1],ch_y_N[:NcpmlY-1],ch_y_F[:NcpmlY-1]) MapD(tofrom:hx[:NCELLX-1][:NCELLY-1][:NCELLZ-1],psi_Hx_z_N[:NCELLX-1][:NCELLY-1][:cpml_N_Z-1],psi_Hx_z_F[:NCELLX-1][:NCELLY-1][:cpml_N_Z-1],psi_Hx_y_N[:NCELLX-1][:cpml_N_Y-1][:NCELLZ-1],psi_Hx_y_F[:NCELLX-1][:cpml_N_Y-1][:NCELLZ-1]) // // { // // //#pragma omp parallel for collapse(3) private(i,j,k,Curl_E,j2,k2) // schedule(static) // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY-1;j++){ // // for(k=0;k<NCELLZ-1;k++){ // if(i<NCELLX && j<(NCELLY-1) && k<(NCELLZ-1)){ // // Curl_E=(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]-(ez[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chxh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // //Z-CPML // if(k<cpml_N_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Near Z-PML // psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]=bh_z_N[k]*psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]+ch_z_N[k]*(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]; // } // if(k>=cpml_F_Z && j<cpml_y_lim && i<cpml_x_lim){ // //Far Z-PML // k2 = k - cpml_F_Z; // psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]=bh_z_F[k2]*psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]+ch_z_F[k2]*(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]; // } // //Y- PML // if(j<cpml_N_Y && i<cpml_x_lim && j<cpml_y_lim){ // psi_Hx_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]=bh_y_N[j]*psi_Hx_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]+ch_y_N[j]*(ez[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dy; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]; // } // if(j>=cpml_F_Y && i<cpml_x_lim && k<cpml_z_lim){ // j2 = j - cpml_F_Y; // psi_Hx_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]=bh_y_F[j2]*psi_Hx_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]+ch_y_F[j2]*(ez[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dy; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]; // } // } // // } // // } // // // } // // } // } // // hipProfilerStop(); // // return; // } // // __global__ void UPDATE_hy(real *hy,real *ez,real *ex,real *Chyh,real *Chye,real *psi_Hy_z_N,real *psi_Hy_z_F,real *psi_Hy_x_N,real *psi_Hy_x_F,real *khdx,real // *khdz,real *bh_z_N,real *bh_z_F,real *ch_z_N,real *ch_z_F,real *bh_x_N,real *bh_x_F,real *ch_x_N,real *ch_x_F,int NCELLX,int NCELLY,int NCELLZ,int Periodic_XY,real dx,real dy,real dz,real dt,int cpml_N_Z,int cpml_F_Z,int cpml_N_X,int cpml_F_X,int cpml_z_lim,int cpml_y_lim,int cpml_x_lim,int NcpmlZ,int NcpmlX){ // // int i,j,k,n,i2,k2; // comp Curl_E; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // if(Periodic_XY){ // ////#pragma omp parallel for collapse(3) private(Curl_E,i,i2,j,k,k2) // schedule(static) // // for(k=0;k<NCELLZ;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX && j<NCELLY && k<NCELLZ){ // // for(k=0;k<NCELLZ-1;k++){ // if(i==NCELLX-1){ // #ifdef DOUBLECOMPLEX // Curl_E=(ez[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]*cexp(-I*k_x*period_x)-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]-(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ez[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]-(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]; // #endif // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // else{ // Curl_E=(ez[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]-(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // //Z-PML // if(k<cpml_N_Z && j<cpml_y_lim && k<cpml_z_lim){ // psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]=bh_z_N[k]*psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]+ch_z_N[k]*(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]; // } // if(k>=cpml_F_Z && j<cpml_y_lim && k<cpml_z_lim){ // k2 = k - cpml_F_Z; // psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]=bh_z_F[k2]*psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]+ch_z_F[k2]*(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]; // } // } // // } // // } // // } // } // // else{ // ////#pragma omp parallel for collapse(3) private(Curl_E,i,i2,j,k,k2) // schedule(static) // // for(i=0;i<NCELLX-1;i++){ // // for(j=0;j<NCELLY;j++){ // // for(k=0;k<NCELLZ-1;k++){ // if(i<(NCELLX-1) && j<NCELLY && k<(NCELLZ-1)){ // // Curl_E=(ez[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]-(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // // //Z-PML // if(k<cpml_N_Z && j<cpml_y_lim && k<cpml_z_lim){ // psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]=bh_z_N[k]*psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]+ch_z_N[k]*(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]; // } // if(k>=cpml_F_Z && j<cpml_y_lim && k<cpml_z_lim){ // k2 = k - cpml_F_Z; // psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]=bh_z_F[k2]*psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]+ch_z_F[k2]*(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]; // } // //X-PML // if(i<cpml_N_X && j<cpml_y_lim && k<cpml_z_lim){ // psi_Hy_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=bh_x_N[i]*psi_Hy_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ch_x_N[i]*(ez[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dx; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(i>=cpml_F_X && j<cpml_y_lim && k<cpml_z_lim){ // i2 = i - cpml_F_X; // psi_Hy_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]=bh_x_F[i2]*psi_Hy_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]+ch_x_F[i2]*(ez[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dx; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]; // } // } // // } // // } // // } // } // return; // } // // //void UPDATE_hz(void){ // __global__ void UPDATE_hz(real *hz,real *ey,real *ex,real *Chzh,real *Chze,real *psi_Hz_x_N,real *psi_Hz_x_F,real *psi_Hz_y_N,real *psi_Hz_y_F,real *khdx,real // *khdy,real *bh_x_N,real *bh_x_F,real *ch_x_N,real *ch_x_F,real *bh_y_N,real *bh_y_F,real *ch_y_N,real *ch_y_F,int NCELLX,int NCELLY,int NCELLZ,int Periodic_XY,real dx,real dy,real dz,real dt,int cpml_N_X,int cpml_F_X,int cpml_N_Y,int cpml_F_Y,int cpml_z_lim,int cpml_y_lim,int cpml_x_lim,int NcpmlY,int NcpmlX){ // // int i,j,k,j2,i2; // comp Curl_E; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // if(Periodic_XY){ // // //#pragma omp parallel for collapse(3) private(Curl_E,i,j,k,j2,i2) // schedule(static) // // for(k=0;k<NCELLZ;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX && j<NCELLY && k<NCELLZ){ // // for(k=0;k<NCELLZ;k++){ // if(i==NCELLX-1 || j== NCELLY-1){ // if(i==NCELLX-1 && j==NCELLY-1){ // //printf("%d,%d,%d\n",i,j,k); // #ifdef DOUBLECOMPLEX // Curl_E=(ex[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]*cexp(-I*k_y*period_y)-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]*cexp(-I*k_x*period_x)-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ex[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // else if(i==NCELLX-1){ // #ifdef DOUBLECOMPLEX // Curl_E=(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]*cexp(-I*k_x*period_x)-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // else{ // #ifdef DOUBLECOMPLEX // Curl_E=(ex[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]*cexp(-I*k_y*period_y)-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ex[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // } // else{ // Curl_E=(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // } // // } // // } // // } // } // // else{ // // //#pragma omp parallel for collapse(3) private(Curl_E,i,j,k,j2,i2) // schedule(static) // // for(i=0;i<NCELLX-1;i++){ // // for(j=0;j<NCELLY-1;j++){ // // for(k=0;k<NCELLZ;k++){ // if(i<(NCELLX-1) && j<(NCELLY-1) && k<NCELLZ){ // // Curl_E=(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // //X-PML // if(i<cpml_N_X && j<cpml_y_lim && k<cpml_z_lim){ // psi_Hz_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=bh_x_N[i]*psi_Hz_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ch_x_N[i]*(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dx; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hz_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(i>=cpml_F_X && j<cpml_y_lim && k<cpml_z_lim){ // i2 = i - cpml_F_X; // psi_Hz_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]=bh_x_F[i2]*psi_Hz_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]+ch_x_F[i2]*(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dx; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hz_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]; // } // //Y-PML // if(j<cpml_N_Y && i<cpml_x_lim && k<cpml_z_lim){ // psi_Hz_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]=bh_y_N[j]*psi_Hz_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]+ch_y_N[j]*(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dy; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hz_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]; // } // if(j>=cpml_F_Y && i<cpml_x_lim && k<cpml_z_lim){ // j2 = j - cpml_F_Y; // psi_Hz_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]=bh_y_F[j2]*psi_Hz_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]+ch_y_F[j2]*(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dy; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hz_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]; // } // } // // } // // } // // } // } // return; // } // void UpdateHydroPx(void){ // int i,j,k; // real Vx1,Vx2,Vx3,Vy1,Vy2,Vy3,Vz1,Vz2,Vz3,Hx1,Hz1,Hy1,Hx2,Hz2,Hy2,Ex1,Ey1,Ez1,VdotGrad,VdotGrad2,DivV,VcrossH,VcrossH2,Pressure,ND1,ND2,ND3,Grad_Div,Grad_Div2; // real INV_DX,INV_DY,INV_DZ; // INV_DX = 1.0/dx; // INV_DY = 1.0/dy; // INV_DZ = 1.0/dz; // Grad_Div = 0.0; // Grad_Div2 =0.0; // ////#pragma omp parallel for collapse(3) // schedule(static) // for(i=0;i<NCELLX-1;i++){ // for(j=1;j<NCELLY-1;j++){ // for(k=1;k<NCELLZ-1;k++){ // if(mat_matrixX[ThreeDMap(i,j,k,NCELLZ,NCELLY)]> first_medium && mat_matrixX[ThreeDMap(i,j,k,NCELLZ,NCELLY)] < 6){ // ND1 = N_EQ + NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; // // Grad_Div = Calc_DIV_GRADx(i,j,k); // Grad_Div2 = Calc_DIV_GRADx2(i,j,k); // // // Vx1 = 0.5 * (Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vx2 = 0.5 * (Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vy1 = 0.25 * (Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vy2 = 0.25 * (Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // //Vy1 = 0.5 * (Vy1 + Vy2); // Vz1 = 0.25 * (Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // Vz2 = 0.25 * (Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // //Vz1 = 0.5 * (Vz1 + Vz2); // Hy2 = 0.5 * (hyPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hyPrev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); // Hz2 = 0.5 * (hzPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hzPrev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]); // Hy1 = 0.5 * (hy[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hy[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); // Hz1 = 0.5 * (hz[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hz[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]); // // if(WithConvection) { // VdotGrad = 0.5*(Vx1*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy1*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz1*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad2 = 0.5*(Vx2*(Px_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy2*(Px_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz2*(Px_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad = (VdotGrad - VdotGrad2)/dt; // } // else VdotGrad = 0.0; // // if(WithMagField){ // VcrossH = Vy1*Hz1 - Vz1*Hy1; // VcrossH2 = Vy2*Hz2 - Vz2*Hy2; // VcrossH = (VcrossH - VcrossH2)/dt; // } // else VcrossH = 0.0; // // Px_d[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] = d_1_d[0]*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_2_d[0]*Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] +d_3_d[0]*ex[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_4_d[0]*ex_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_NL[0]*(Grad_Div + Grad_Div2/N_EQ)/pow(ND1,1.0/3.0) + d_5_d[0]*(VdotGrad + VcrossH); // // } // // } // } // } // } // // void UpdateHydroPy(void){ // int i,j,k; // real Vx1,Vx2,Vx3,Vy1,Vy2,Vy3,Vz1,Vz2,Vz3,Hx1,Hz1,Hy1,Hx2,Hz2,Hy2,Ex1,Ey1,Ez1,VdotGrad,VdotGrad2,DivV,VcrossH,VcrossH2,Pressure,ND1,ND2,ND3,Grad_Div,Grad_Div2; // real INV_DX,INV_DY,INV_DZ; // INV_DX = 1.0/dx; // INV_DY = 1.0/dy; // INV_DZ = 1.0/dz; // Grad_Div = 0.0; // Grad_Div2 =0.0; // ////#pragma omp parallel for collapse(3) // schedule(static) // for(i=1;i<NCELLX-1;i++){ // for(j=0;j<NCELLY-1;j++){ // for(k=1;k<NCELLZ-1;k++){ // if(mat_matrixY[ThreeDMap(i,j,k,NCELLZ,NCELLY)]> first_medium && mat_matrixY[ThreeDMap(i,j,k,NCELLZ,NCELLY)] < 6){ // // Grad_Div = Calc_DIV_GRADy(i,j,k); // Grad_Div2 = Calc_DIV_GRADy2(i,j,k); // // ND1 = N_EQ + NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; // // Vy1 = 0.5 * (Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vy2 = 0.5 * (Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx1 = 0.25 * (Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vx2 = 0.25 * (Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx1 = 0.5 * (Vx1 + Vx2); // Vz1 = 0.25 * (Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // Vz2 = 0.25 * (Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // //Vz1 = 0.5 * (Vz1 + Vz2); // Hx1 = 0.5 * (hx[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hx[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); // Hz1 = 0.5 * (hz[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hz[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]); // Hx2 = 0.5 * (hxPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hxPrev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); // Hz2 = 0.5 * (hzPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hzPrev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]); // // // if(WithMagField){ // VcrossH = Vz1*Hx1 - Vx1*Hz1; // VcrossH2 = Vz2*Hx2 - Vx2*Hz2; // VcrossH = (VcrossH - VcrossH2)/dt; // } // else VcrossH = 0.0; // if(WithConvection) { // VdotGrad = 0.5*(Vx1*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy1*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz1*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad2 = 0.5*(Vx2*(Py_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy2*(Py_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz2*(Py_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad = (VdotGrad - VdotGrad2)/dt; // } // else VdotGrad = 0.0; // // Py_d[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] = d_1_d[0]*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_2_d[0]*Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_3_d[0]*ey[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_4_d[0]*ey_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_NL[0]*(Grad_Div + Grad_Div2/N_EQ)/pow(ND1,1.0/3.0) + d_5_d[0]*(VdotGrad + VcrossH); // // } // // // } // } // } // } // // void UpdateHydroPz(void){ // int i,j,k; // real Vx1,Vx2,Vx3,Vy1,Vy2,Vy3,Vz1,Vz2,Vz3,Hx1,Hz1,Hy1,Hx2,Hz2,Hy2,Ex1,Ey1,Ez1,VdotGrad,VdotGrad2,DivV,VcrossH,VcrossH2,Pressure,ND1,ND2,ND3,Grad_Div,Grad_Div2; // real INV_DX,INV_DY,INV_DZ; // INV_DX = 1.0/dx; // INV_DY = 1.0/dy; // INV_DZ = 1.0/dz; // Grad_Div = 0.0; // Grad_Div2 =0.0; // ////#pragma omp parallel for collapse(3) // schedule(static) // for(i=1;i<NCELLX-1;i++){ // for(j=1;j<NCELLY-1;j++){ // for(k=0;k<NCELLZ-1;k++){ // if(mat_matrixZ[ThreeDMap(i,j,k,NCELLZ,NCELLY)]> first_medium && mat_matrixZ[ThreeDMap(i,j,k,NCELLZ,NCELLY)] < 6){ // // Grad_Div = Calc_DIV_GRADz(i,j,k); // Grad_Div2 = Calc_DIV_GRADz2(i,j,k); // // ND1 = N_EQ + NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; // // Vz1 = 0.5 * (Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vz2 = 0.5 * (Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx1 = 0.25 * (Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vx2 = 0.25 * (Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // //Vx1 = 0.5 * (Vx1 + Vx2); // Vy1 = 0.25 * (Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vy2 = 0.25 * (Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vy1 = 0.5 * (Vy1 + Vy2); // Hy1 = 0.5 * (hy[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)] + hy[ThreeDMap(i,j,k,NCELLZ,NCELLY)]); // Hx1 = 0.5 * (hx[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hx[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]); // Hy2 = 0.5 * (hyPrev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)] + hyPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]); // Hx2 = 0.5 * (hxPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hxPrev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]); // // if(WithMagField){ // VcrossH = Vx1*Hy1 - Vy1*Hx1; // VcrossH2 = Vx2*Hy2 - Vy2*Hx2; // VcrossH = (VcrossH - VcrossH2)/dt; // } // else VcrossH = 0.0; // // if(WithConvection){ // VdotGrad = 0.5*(Vx1*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy1*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz1*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad2 = 0.5*(Vx2*(Pz_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy2*(Pz_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz2*(Pz_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad = (VdotGrad - VdotGrad2)/dt; // } // else VdotGrad = 0.0; // // Pz_d[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] = d_1_d[0]*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_2_d[0]*Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_3_d[0]*ez[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_4_d[0]*ez_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_NL[0]*(Grad_Div + Grad_Div2/N_EQ)/pow(ND1,1.0/3.0) + d_5_d[0]*(VdotGrad + VcrossH); // // } // // // } // } // } // } // // // comp Calc_DIV_GRADx2(int i,int j, int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i+1,j,k,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i+1,j,k,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i+1,j-1,k,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i+1,j,k,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i+1,j,k-1,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); } comp Calc_DIV_GRADx(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; for(n=0;n<N_drude_poles;n++){ if(Diverge_Gradient){ if(i==0 && j==0){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==NCELLX-1 && j==0){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(0,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(0,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==NCELLX-1){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(0,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(0,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==0){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(j==0){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else{ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } } } return Div_Grad; } comp Calc_DIV_GRADy2(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j+1,k,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j+1,k,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j+1,k,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j+1,k,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j+1,k-1,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); return Div_Grad; } comp Calc_DIV_GRADy(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; for(n=0;n<N_drude_poles;n++){ if(Diverge_Gradient){ if(i==0 && j==0){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==0 && j==NCELLY-1){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,0,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,0,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(j==NCELLY-1){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,0,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,0,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(j==0){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==0){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else{ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } } } //printf("%e\n",Div_Grad); return Div_Grad; } comp Calc_DIV_GRADz2(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k+1,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j,k+1,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j-1,k+1,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j,k+1,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j,k+1,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]); return Div_Grad; } comp Calc_DIV_GRADz(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; for(n=0;n<N_drude_poles;n++){ if(Diverge_Gradient){ if(i==0 && j==0){ Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,NCELLY-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==0){ Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(j==0){ Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,NCELLY-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); } else{ Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // if(t==173 && k==102){ // printf("%e\t%e\t%e\n",Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)],Py_d_n[i][j+1][k+1][0]-Py_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[i][j-1][k-1][0],Px_d_n[i+1][j][k+1][0]-Px_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[i-1][j][k-1][0]); // printf("%e\n",Py_d_n[i][j+1][k+1][0]-(Py_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)])+Py_d_n[i][j-1][k-1][0]); // // } //1.186946e-66 } // printf("%e\n",Div_Grad); } } return Div_Grad; }
4919e56601a9719e354d3917959b3e870e287441.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> #include "extern_var.h" #include "cuda_profiler_api.h" #include <cuda.h> // #include <cuda_runtime.h> // #include <device_launch_parameters.h> //#include<conio.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <cuda_runtime_api.h> #include <cudaProfiler.h> // __device__ int ThreeDMapD(int i,int j,int k,int SizeZ,int SizeY){ // int num = k + SizeZ*j +SizeY*SizeZ*i; // return num; // } // // // __device__ int FourDMapD(int i,int j,int k,int n,int SizeN,int SizeZ,int SizeY){ // int num = n + SizeN*( k + SizeZ*j +SizeY*SizeZ*i); // return num; // } // // __device__ int TwoDMapD(int i,int j,int size){ // int num = j + i*size; // return num; // } //update B-field void UPDATE_B(){ // if(TEz && polar_psi==0){ // UPDATE_hx(); // UPDATE_hz(); // } // else if(TMz && polar_psi==0){ // UPDATE_hy(); // } //else{ int Number; int threadsPerBlock = 256; Number = NCELLX * NCELLY *NCELLZ; int blocksPerGrid = Number/threadsPerBlock + 1; // cudaProfilerStart() ; // // UPDATE_hx <<<blocksPerGrid, threadsPerBlock>>> (hx,ez,ey,Chxh,Chxe,psi_Hx_z_N,psi_Hx_z_F,psi_Hx_y_N,psi_Hx_y_F,khdy,khdz,bh_z_N,bh_z_F,ch_z_N,ch_z_F,bh_y_N,bh_y_F,ch_y_N,ch_y_F,NCELLX,NCELLY,NCELLZ,Periodic_XY,dx,dy,dz,dt,cpml_N_Z,cpml_F_Z,cpml_N_Y,cpml_F_Y,cpml_z_lim,cpml_y_lim,cpml_x_lim,NcpmlZ,NcpmlY); // UPDATE_hy <<<blocksPerGrid, threadsPerBlock>>> (hy,ez,ex,Chyh,Chye,psi_Hy_z_N,psi_Hy_z_F,psi_Hy_x_N,psi_Hy_x_F,khdx,khdz,bh_z_N,bh_z_F,ch_z_N,ch_z_F,bh_x_N,bh_x_F,ch_x_N,ch_x_F,NCELLX,NCELLY,NCELLZ,Periodic_XY,dx,dy,dz,dt,cpml_N_Z,cpml_F_Z,cpml_N_X,cpml_F_X,cpml_z_lim,cpml_y_lim,cpml_x_lim,NcpmlZ,NcpmlX); // UPDATE_hz <<<blocksPerGrid, threadsPerBlock>>> (hz,ey,ex,Chzh,Chze,psi_Hz_x_N,psi_Hz_x_F,psi_Hz_y_N,psi_Hz_y_F,khdx,khdy,bh_x_N,bh_x_F,ch_x_N,ch_x_F,bh_y_N,bh_y_F,ch_y_N,ch_y_F,NCELLX,NCELLY,NCELLZ,Periodic_XY,dx,dy,dz,dt,cpml_N_X,cpml_F_X,cpml_N_Y,cpml_F_Y,cpml_z_lim,cpml_y_lim,cpml_x_lim,NcpmlY,NcpmlX); cudaDeviceSynchronize(); // cudaProfilerStop() ; // UPDATE_hz(); // } } //update E-field void UPDATE_E(){ int i,j,k,n; comp hold; int Number; int threadsPerBlock = 256; Number = NCELLX * NCELLY *NCELLZ; int blocksPerGrid = Number/threadsPerBlock + 1; // // // UPDATE_ex(); // UPDATE_ex <<<blocksPerGrid, threadsPerBlock>>> (ex,ex_n,ex_n_1,hy,hz,Cexe,Cexh,kedy,kedz,mat_matrix,mat_matrixX,first_medium_max,psi_Ex_z_N,psi_Ex_z_F,psi_Ex_y_N,psi_Ex_y_F,Px_cp,Px_cp_n,Px_cp_n_1,Px_d,Px_d_n,Px_d_n_1, // C_1_cp,C_2_cp,C_3_cp,C_4_cp,C_5_cp,d_1_d,d_2_d,d_3_d,d_4_d,d_5_d,d_NL,C_E,z0,N_CP_poles,N_drude_poles,ce_z_N,ce_z_F,be_z_N,be_z_F,ce_y_N,ce_y_F,be_y_N,be_y_F,dx,dy,dz,dt,NCELLX,NCELLY,NCELLZ, // Hydrodynamics,cpml_x_lim,cpml_y_lim,cpml_z_lim,cpml_N_Y,cpml_F_Y,cpml_N_Z,cpml_F_Z,NcpmlY,NcpmlZ, C_E_1,C_E_2,Periodic_XY); // //UPDATE_ey(); // UPDATE_ey <<<blocksPerGrid, threadsPerBlock>>> (ey,ey_n,ey_n_1,hx,hz,Ceye,Ceyh,kedx,kedz,mat_matrix,mat_matrixY,first_medium_max,psi_Ey_z_N,psi_Ey_z_F,psi_Ey_x_N,psi_Ey_x_F,Py_cp,Py_cp_n,Py_cp_n_1,Py_d,Py_d_n,Py_d_n_1, // C_1_cp,C_2_cp,C_3_cp,C_4_cp,C_5_cp,d_1_d,d_2_d,d_3_d,d_4_d,d_5_d,d_NL,C_E,z0,N_CP_poles,N_drude_poles,ce_z_N,ce_z_F,be_z_N,be_z_F,ce_y_N,ce_x_F,be_x_N,be_x_F,dx,dy,dz,dt,NCELLX,NCELLY,NCELLZ, // Hydrodynamics,cpml_x_lim,cpml_y_lim,cpml_z_lim,cpml_N_X,cpml_F_X,cpml_N_Z,cpml_F_Z,NcpmlX,NcpmlZ, C_E_1,C_E_2,Periodic_XY); // // // UPDATE_ez(); // UPDATE_ez <<<blocksPerGrid, threadsPerBlock>>> (ez,ez_n,ez_n_1,hx,hy,Ceze,Cezh,kedx,kedy,mat_matrix,mat_matrixZ,first_medium_max,psi_Ez_y_N,psi_Ez_y_F,psi_Ez_x_N,psi_Ez_x_F,Pz_cp,Pz_cp_n,Pz_cp_n_1,Pz_d,Pz_d_n,Pz_d_n_1, // C_1_cp,C_2_cp,C_3_cp,C_4_cp,C_5_cp,d_1_d,d_2_d,d_3_d,d_4_d,d_5_d,d_NL,C_E,z0,N_CP_poles,N_drude_poles,ce_y_N,ce_y_F,be_y_N,be_y_F,ce_x_N,ce_x_F,be_x_N,be_x_F,dx,dy,dz,dt,NCELLX,NCELLY,NCELLZ, // Hydrodynamics,cpml_x_lim,cpml_y_lim,cpml_z_lim,cpml_N_X,cpml_F_X,cpml_N_Y,cpml_F_Y,NcpmlX,NcpmlY, C_E_1,C_E_2,Periodic_XY); // cudaDeviceSynchronize(); // for(i=0;i<NCELLX;i++){ // for(j=0;j<NCELLY;j++){ // printf("%e\t",Px_d[i][j][dispersive_slab+20][0]); // //printf("%e\t",hx[i][j][dispersive_slab+20]); // } // printf("\n"); // } ////#pragma omp parallel for collapse(3) private(i,j,k,n) //// schedule(guided) for(i=0;i<NCELLX;i++){ for(j=0;j<NCELLY;j++){ for(k=0;k<NCELLZ;k++){ if(mat_matrixX[ThreeDMap(i,j,k,NCELLZ,NCELLY)] < 6 || mat_matrixY[ThreeDMap(i,j,k,NCELLZ,NCELLY)] <6 || mat_matrixZ[ThreeDMap(i,j,k,NCELLZ,NCELLY)] <6 || mat_matrix[ThreeDMap(i,j,k,NCELLZ,NCELLY)] <6){ for(n=0;n<N_drude_poles;n++){ Px_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Px_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Px_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Px_d[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Py_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Py_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Py_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Py_d[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Pz_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Pz_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; Pz_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)] = Pz_d[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; } for(n=0;n<N_CP_poles;n++){ Px_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Px_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Px_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Px_cp[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Py_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Py_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Py_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Py_cp[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Pz_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Pz_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; Pz_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)] = Pz_cp[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; } if(Hydrodynamics){ hxPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = hx[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; hyPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = hy[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; hzPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = hz[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; hold = NDx[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDx[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=hold; hold = NDy[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDy[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=hold; hold = NDz[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDz[ThreeDMap(i,j,k,NCELLZ,NCELLY)] = NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=hold; } } } } } } // // __global__ void UPDATE_ex(real *ex,real *ex_n,real *ex_n_1,real *hy,real *hz,real *Cexe,real *Cexh,real *kedy,real *kedz,int *mat_matrix,int *mat_matrixX,int first_medium_max,real *psi_Ex_z_N, // real *psi_Ex_z_F,real *psi_Ex_y_N,real *psi_Ex_y_F,real *Px_cp,real *Px_cp_n,real *Px_cp_n_1,real *Px_d,real *Px_d_n,real *Px_d_n_1,real *C_1_cp,real *C_2_cp,real *C_3_cp,real *C_4_cp,real *C_5_cp,real *d_1_d, // real *d_2_d,real *d_3_d,real *d_4_d,real *d_5_d,real *d_NL,real C_E,real z0,int N_CP_poles,int N_drude_poles,real *ce_z_N,real *ce_z_F,real *be_z_N,real *be_z_F,real *ce_y_N,real *ce_y_F,real *be_y_N,real *be_y_F, // real dx,real dy,real dz,real dt,int NCELLX,int NCELLY,int NCELLZ,int Hydrodynamics,int cpml_x_lim,int cpml_y_lim,int cpml_z_lim,int cpml_N_Y,int cpml_F_Y,int cpml_N_Z,int cpml_F_Z,int NcpmlY,int NcpmlZ,real C_E_1,real C_E_2,int Periodic_XY){ // int i,j,k,n,k2,j2; // comp Curl_H, Div_Grad=0.0,J_T,dummy_var; // comp Vx1,Vx2,Vy1,Vy2,Vz1,Vz2,Nx1,Nx2,Ny1,Ny2,Nz1,Nz2; // comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; // double INV_DX = 1.0/dx; // double INV_DY = 1.0/dy; // double INV_DZ = 1.0/dz; // // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // // if(Periodic_XY){ // ////#pragma omp parallel for collapse(3) private(Curl_H,i,j,j2,k,k2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(k=1;k<NCELLZ-1;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX && j<NCELLY && k>0 && k<(NCELLZ-1)){ // //for(k=1;k<NCELLZ-1;k++){ // if(j==0){ // #ifdef DOUBLECOMPLEX // Curl_H=(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)]*cexp(I*k_y*period_y))/kedy[j]-(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)])/kedy[j]-(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]; // #endif // // } // else{ // Curl_H=(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]-(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]; // } // // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] < 6){ // // // // Div_Grad = Calc_DIV_GRADx(i,j,k); // Div_Grad = 0.0; // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1)*Px_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Px_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1)*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4 - C_P_NL); // // // //printf("%e\n",Div_Grad); // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Near-Z-PML // psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // k2 = k - cpml_F_Z ; // psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=(1/C_E)*dt*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // } // // for(n=0;n<N_CP_poles;n++){ // Px_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // for(n=0;n<N_drude_poles;n++){ // Px_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Px_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Px_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // // } // // } // // else{ // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Cexe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Near-Z-PML // psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_z_N[k]*psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_z_N[k]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // k2 = k - cpml_F_Z ; // psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]; // } // } // } // // // // // } // // } // // } // } // //No PBCs // else{ // ////#pragma omp target teams distribute parallel for collapse(3) schedule(static,1) private(Curl_H,i,j,j2,k,k2,n,dummy_var,J_T,Div_Grad) // // //#pragma omp parallel for collapse(3) private(Curl_H,i,j,j2,k,k2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(i=0;i<NCELLX-1;i++){ // // for(j=1;j<NCELLY-1;j++){ // // for(k=1;k<NCELLZ-1;k++){ // if(i<(NCELLX-1) && j>0 && j<(NCELLY-1) && k>0 && k<(NCELLZ-1)){ // Curl_H=(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]-(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]; // // if(mat_matrixX[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrixX[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] < 6){ // // // if(Hydrodynamics == 0) // { // //Div_Grad = Calc_DIV_GRADx(i,j,k); // Div_Grad = 0.0; // // CP_D_ex(i,j,k,Curl_H,Div_Grad); // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1)*Px_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Px_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1)*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4 - C_P_NL); // // // // // for(n=0;n<N_CP_poles;n++){ // Px_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // for(n=0;n<N_drude_poles;n++){ // Px_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Px_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Px_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // // } // } // // else{ // // // // Vx1 = Px_d_n[FourDMapD(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]; // // Vx2 = Px_d_n[FourDMapD(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]; // // Nx1 = NDx_prev[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]; // // Nx2 = NDx_prev[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)]; // // // // Vy1 = 0.5*(Py_d_n[FourDMapD(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vy2 = 0.5*(Py_d_n[FourDMapD(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMapD(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Ny1 = 0.5*(NDy_prev[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)] + NDy_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Ny2 = 0.5*(NDy_prev[ThreeDMapD(i+1,j-1,k,NCELLZ,NCELLY)] + NDy_prev[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)]); // // // // Vz1 = 0.5*(Pz_d_n[FourDMapD(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vz2 = 0.5*(Pz_d_n[FourDMapD(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMapD(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // // Nz1 = 0.5*(NDz_prev[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)] + NDz_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Nz2 = 0.5*(NDz_prev[ThreeDMapD(i+1,j,k-1,NCELLZ,NCELLY)] + NDz_prev[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)]); // // // // NDx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] = NDx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] - 2.0*dt*INV_DX*(0.5*(Nx1*Vx1-Nx2*Vx2) + (Ny1*Vy1-Ny2*Vy2) + (Nz1*Vz1-Nz2*Vz2) + (0.5*(Vx1-Vx2) + (Vy1-Vy2) + (Vz1-Vz2))*N_EQ); // // // // // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // // // // // for(n=0;n<N_CP_poles;n++){ // // C_P_2+=(C_1_cp[n]-1)*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // C_P_4+=(C_2_cp[n])*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // } // // ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0 + dt*Px_d[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*e0*(N_EQ + NDx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]) -C_E_2*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_2-C_P_4); // // // // for(n=0;n<N_CP_poles;n++){ // // Px_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // // // // // } // // // printf("%e\n",Div_Grad); // //Z-CPML // // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // // //Near-Z-PML // // psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_z_N[k]*psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_z_N[k]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // // k2 = k - cpml_F_Z ; // // psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=(1/C_E)*dt*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // // } // // // // } // // else{ // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Cexe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // } // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Near-Z-PML // psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ //Far Z PML // k2 = k - cpml_F_Z; // psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // if(mat_matrixX[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=(1/C_E)*dt*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // // } // // else{ // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]; // //} // } // //Y PML // if(j<cpml_N_Y+1 && i<cpml_x_lim && k<cpml_z_lim){ //Near Y PML // psi_Ex_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]=be_y_N[j]*psi_Ex_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]+ce_y_N[j]*(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/dy; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]; // } // if(j>=cpml_F_Y && i<cpml_x_lim && k<cpml_z_lim){ //Far Y PML // j2 = j - cpml_F_Y; // psi_Ex_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]=be_y_F[j2]*psi_Ex_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]+ce_y_F[j2]*(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/dy; // ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Cexh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ex_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]; // } // // if(mat_matrixX[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // // for(n=0;n<N_CP_poles;n++){ // // Px_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Px_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Px_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // for(n=0;n<N_drude_poles;n++){ // // Px_d[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=d_1_d[n]*Px_d_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+d_2_d[n]*Px_d_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+d_3_d[n]*ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ex_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ex_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // } // } // // } // // } // // } // } // // return; // } // // // // // // // __global__ void UPDATE_ey(real *ey,real *ey_n,real *ey_n_1,real *hx,real *hz,real *Ceye,real *Ceyh,real *kedx,real *kedz,int *mat_matrix,int *mat_matrixY,int first_medium_max,real *psi_Ey_z_N, // real *psi_Ey_z_F,real *psi_Ey_x_N,real *psi_Ey_x_F,real *Py_cp,real *Py_cp_n,real *Py_cp_n_1,real *Py_d,real *Py_d_n,real *Py_d_n_1,real *C_1_cp,real *C_2_cp,real *C_3_cp,real *C_4_cp,real *C_5_cp,real *d_1_d, // real *d_2_d,real *d_3_d,real *d_4_d,real *d_5_d,real *d_NL,real C_E,real z0,int N_CP_poles,int N_drude_poles,real *ce_z_N,real *ce_z_F,real *be_z_N,real *be_z_F,real *ce_x_N,real *ce_x_F,real *be_x_N,real *be_x_F, // real dx,real dy,real dz,real dt,int NCELLX,int NCELLY,int NCELLZ,int Hydrodynamics,int cpml_x_lim,int cpml_y_lim,int cpml_z_lim,int cpml_N_X,int cpml_F_X,int cpml_N_Z,int cpml_F_Z,int NcpmlX,int NcpmlZ,real C_E_1,real C_E_2,int Periodic_XY){ // int i,j,k,i2,k2,n; // comp Curl_H,Div_Grad=0.0,dummy_var,J_T; // comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; // double INV_DX = 1.0/dx; // double INV_DY = 1.0/dy; // double INV_DZ = 1.0/dz; // comp Vx1,Vx2,Vy1,Vy2,Vz1,Vz2,Nx1,Nx2,Ny1,Ny2,Nz1,Nz2; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // // if(Periodic_XY){ // ////#pragma omp parallel for collapse(3) private(Curl_H,i,j,i2,k,k2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(k=0;k<NCELLZ-1;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX && j<NCELLY && k>0 && k<(NCELLZ-1)){ // // for(k=1;k<NCELLZ-1;k++){ // if(i==0){ // #ifdef DOUBLECOMPLEX // Curl_H=(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]-(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)]*cexp(I*period_x*k_x))/kedx[i]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]-(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)])/kedx[i]; // #endif // //printf("%d,%d,%d \t %f\t%f\t%f\n",i,j,k,creal(Curl_H),creal(hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)]), creal(hz[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)])); // // } // // else{ // Curl_H=(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]-(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]; // } // if(mat_matrixY[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrixY[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] <6){ // // //Div_Grad = Calc_DIV_GRADy(i,j,k); // Div_Grad = 0.0; // // printf("%e\n",d_NL[0]*Div_Grad); // // CP_D_ey(i,j,k,Curl_H,Div_Grad); // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // //printf("here"); // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1.0)*Py_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Py_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1.0)*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); // // // // printf("%e\n",ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the near Z-PML // psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the far Z-PML // k2 = k - cpml_F_Z ; // psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=(1/C_E)*dt*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // } // // for(n=0;n<N_CP_poles;n++){ // Py_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // //if(Hydrodynamics == 0){ // for(n=0;n<N_drude_poles;n++){ // Py_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Py_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Py_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // // } // //} // // } // // else{ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Ceye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // //Z-CPML // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the near Z-PML // psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the far Z-PML // k2 = k - cpml_F_Z ; // psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=(1/C_E)*dt*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // } // else{ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]; // } // } // } // // } // // // } // // } // // } // } // // else{ // // //#pragma omp parallel for collapse(3) private(Curl_H,i,j,i2,k,k2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(i=1;i<NCELLX-1;i++){ // // for(j=0;j<NCELLY-1;j++){ // // for(k=1;k<NCELLZ-1;k++){ // if(i>0 && i<(NCELLX-1) && j<(NCELLY-1) && k>0 && k<(NCELLZ-1)){ // Curl_H=(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/kedz[k]-(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]; // if(mat_matrixY[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrixY[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] <6){ // // if(Hydrodynamics == 0){ // //Div_Grad = Calc_DIV_GRADy(i,j,k); // Div_Grad= 0.0; // // printf("%e\n",d_NL[0]*Div_Grad); // // CP_D_ey(i,j,k,Curl_H,Div_Grad); // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // //printf("here"); // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1.0)*Py_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Py_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1.0)*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); // // // printf("%e\n",ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // //Z-CPML // // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // // //Here we are in the near Z-PML // // psi_Ey_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_z_N[k]*psi_Ey_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_z_N[k]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // // //Here we are in the far Z-PML // // k2 = k - cpml_F_Z ; // // psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=(1/C_E)*dt*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // // } // // for(n=0;n<N_CP_poles;n++){ // Py_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // for(n=0;n<N_drude_poles;n++){ // Py_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Py_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Py_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // // } // // } // // else{ // // // // Vy1 = Py_d_n[FourDMapD(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]; // // Vy2 = Py_d_n[FourDMapD(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]; // // Ny1 = NDy_prev[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]; // // Ny2 = NDy_prev[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)]; // // // // Vx1 = 0.5*(Px_d_n[FourDMapD(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx2 = 0.5*(Px_d_n[FourDMapD(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMapD(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Nx1 = 0.5*(NDx_prev[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)] + NDx_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Nx2 = 0.5*(NDx_prev[ThreeDMapD(i-1,j+1,k,NCELLZ,NCELLY)] + NDx_prev[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)]); // // // // Vz1 = 0.5*(Pz_d_n[FourDMapD(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vz2 = 0.5*(Pz_d_n[FourDMapD(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMapD(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // // Nz1 = 0.5*(NDz_prev[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)] + NDz_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Nz2 = 0.5*(NDz_prev[ThreeDMapD(i,j+1,k-1,NCELLZ,NCELLY)] + NDz_prev[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)]); // // // // NDy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] = NDy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] - 2.0*dt*INV_DX*((Nx1*Vx1-Nx2*Vx2) + 0.5*(Ny1*Vy1-Ny2*Vy2) + (Nz1*Vz1-Nz2*Vz2) + ((Vx1-Vx2) + 0.5*(Vy1-Vy2) + (Vz1-Vz2))*N_EQ); // // // // // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // // // for(n=0;n<N_CP_poles;n++){ // // C_P_2+=(C_1_cp[n]-1.0)*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // C_P_4+=(C_2_cp[n])*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // } // // ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0 + dt*Py_d[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*e0*(N_EQ + NDy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]) + C_E_1*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_2-C_P_4); // // for(n=0;n<N_CP_poles;n++){ // // Py_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Py_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Py_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ey_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ey_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // // // } // } // // // else{ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Ceye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // // } // // if(k<cpml_N_Z+1 && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the near Z-PML // psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]=be_z_N[k]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]+ce_z_N[k]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_N[ThreeDMapD(i,j,k,NcpmlZ+1,NCELLY)]; // } // if(k>=cpml_F_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Here we are in the far Z-PML // k2 = k - cpml_F_Z; // psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]=be_z_F[k2]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]+ce_z_F[k2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)])/dz; // // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=(1/C_E)*dt*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]/z0; // // } // // else{ // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_z_F[ThreeDMapD(i,j,k2,NcpmlZ+1,NCELLY)]; // //} // } // //X-CPML // if(i<cpml_N_X+1 && j<cpml_y_lim && k<cpml_z_lim){ // //Here we are in the near-X-PML // psi_Ey_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_x_N[i]*psi_Ey_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_x_N[i]*(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/dx; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(i>=cpml_F_X && j<cpml_y_lim && k<cpml_z_lim){ // //Here we are in the far-X-PML // i2 = i - cpml_F_X; // psi_Ey_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]=be_x_F[i2]*psi_Ey_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]+ce_x_F[i2]*(hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hz[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/dx; // ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Ceyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ey_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]; // } // // // } // // } // // } // // // // } // // } // // return; // } // // // //void UPDATE_ez(void){ // __global__ void UPDATE_ez(real *ez,real *ez_n,real *ez_n_1,real *hx,real *hy,real *Ceze,real *Cezh,real *kedx,real *kedy,int *mat_matrix,int *mat_matrixZ,int first_medium_max,real *psi_Ez_y_N, // real *psi_Ez_y_F,real *psi_Ez_x_N,real *psi_Ez_x_F,real *Pz_cp,real *Pz_cp_n,real *Pz_cp_n_1,real *Pz_d,real *Pz_d_n,real *Pz_d_n_1,real *C_1_cp,real *C_2_cp,real *C_3_cp,real *C_4_cp,real *C_5_cp,real *d_1_d, // real *d_2_d,real *d_3_d,real *d_4_d,real *d_5_d,real *d_NL,real C_E,real z0,int N_CP_poles,int N_drude_poles,real *ce_y_N,real *ce_y_F,real *be_y_N,real *be_y_F,real *ce_x_N,real *ce_x_F,real *be_x_N,real *be_x_F, // real dx,real dy,real dz,real dt,int NCELLX,int NCELLY,int NCELLZ,int Hydrodynamics,int cpml_x_lim,int cpml_y_lim,int cpml_z_lim,int cpml_N_X,int cpml_F_X,int cpml_N_Y,int cpml_F_Y,int NcpmlX,int NcpmlY,real C_E_1,real C_E_2,int Periodic_XY){ // // int i,j,k,i2,j2,n; // comp Curl_H,Div_Grad=0.0,dummy_var,J_T; // comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; // comp Vx1,Vx2,Vy1,Vy2,Vz1,Vz2,Nx1,Nx2,Ny1,Ny2,Nz1,Nz2; // // double INV_DX = 1.0/dx; // double INV_DY = 1.0/dy; // double INV_DZ = 1.0/dz; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // // if(Periodic_XY){ // // //#pragma omp parallel for collapse(3) private(Curl_H,i,j,k,i2,j2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(k=0;k<NCELLZ-1;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX,j<NCELLY,k<NCELLZ){ // // for(k=0;k<NCELLZ-1;k++){ // //printf("Thread %d, ready to work\n",omp_get_thread_num()); // // if(i==0 || j==0){ // if(i==0 && j==0){ // #ifdef DOUBLECOMPLEX // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)]*cexp(I*k_x*period_x))/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)]*cexp(I*k_y*period_y))/kedy[j]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)])/kedy[j]; // #endif // // // } // else if(i==0){ // #ifdef DOUBLECOMPLEX // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)]*cexp(I*k_x*period_x))/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(NCELLX-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]; // #endif // // // } // else{ // #ifdef DOUBLECOMPLEX // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)]*cexp(I*k_y*period_y))/kedy[j]; // #endif // #ifdef DOUBLEPRECISION // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,NCELLY-1,k,NCELLZ,NCELLY)])/kedy[j]; // #endif // // // } // } // // else{ // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]; // // } // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]<6){ // // // Div_Grad = Calc_DIV_GRADz(i,j,k); // Div_Grad = 0.0; // // // CP_D_ez(i,j,k,Curl_H,Div_Grad); // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1)*Pz_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Pz_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1)*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); // // // for(n=0;n<N_CP_poles;n++){ // Pz_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // // for(n=0;n<N_drude_poles;n++){ // Pz_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Pz_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Pz_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // } // } // // else{ // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Ceze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // } // // } // // } // // } // // } // } // // else{ // // //#pragma omp parallel for collapse(3) private(Curl_H,i,j,k,i2,j2,n,dummy_var,J_T,Div_Grad) // schedule(static) // // for(i=1;i<NCELLX-1;i++){ // // for(j=1;j<NCELLY-1;j++){ // // for(k=0;k<NCELLZ-1;k++){ // if(i>0 && i<(NCELLX-1) && j>0 && j<(NCELLY-1) && k<(NCELLZ-1)){ // Curl_H=(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/kedx[i]-(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/kedy[j]; // if(mat_matrixZ[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max && mat_matrixZ[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]<6){ // // if(Hydrodynamics == 0){ // //Div_Grad = Calc_DIV_GRADz(i,j,k); // Div_Grad = 0.0; // // CP_D_ez(i,j,k,Curl_H,Div_Grad); // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // for(n=0;n<N_drude_poles;n++){ // C_P_1+=(d_1_d[n]-1)*Pz_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_3+=(d_2_d[n])*Pz_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; // C_P_NL += d_NL[n]*Div_Grad; // } // for(n=0;n<N_CP_poles;n++){ // C_P_2+=(C_1_cp[n]-1)*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // C_P_4+=(C_2_cp[n])*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // } // ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); // // // for(n=0;n<N_CP_poles;n++){ // Pz_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // for(n=0;n<N_drude_poles;n++){ // Pz_d[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Pz_d_n[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Pz_d_n_1[FourDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + d_NL[n]*Div_Grad; // } // // } // // else{ // // // // C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; // // // // Vz1 = Pz_d_n[FourDMapD(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]; // // Vz2 = Pz_d_n[FourDMapD(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]; // // Nz1 = NDz_prev[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]; // // Nz2 = NDz_prev[ThreeDMapD(i,j,k-1,NCELLZ,NCELLY)]; // // // // Vx1 = 0.5*(Px_d_n[FourDMapD(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx2 = 0.5*(Px_d_n[FourDMapD(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMapD(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Nx1 = 0.5*(NDx_prev[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)] + NDx_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Nx2 = 0.5*(NDx_prev[ThreeDMapD(i-1,j,k+1,NCELLZ,NCELLY)] + NDx_prev[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)]); // // // // Vy1 = 0.5*(Py_d_n[FourDMapD(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vy2 = 0.5*(Py_d_n[FourDMapD(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMapD(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Ny1 = 0.5*(NDy_prev[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)] + NDy_prev[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]); // // Ny2 = 0.5*(NDy_prev[ThreeDMapD(i,j-1,k+1,NCELLZ,NCELLY)] + NDy_prev[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)]); // // // // NDz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] = NDz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] - 2.0*dt*INV_DX*((Nx1*Vx1-Nx2*Vx2) + (Ny1*Vy1-Ny2*Vy2) + 0.5*(Vz1-Vz2) + ((Vx1-Vx2) + (Vy1-Vy2) + 0.5*(Vz1-Vz2))*N_EQ); // // // // // // for(n=0;n<N_CP_poles;n++){ // // C_P_2+=(C_1_cp[n]-1)*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // C_P_4+=(C_2_cp[n])*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; // // } // // ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0 + dt*Pz_d[FourDMapD(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*e0*(NDz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] + N_EQ)+C_E_1*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_E_2*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-C_P_2-C_P_4); // // // // for(n=0;n<N_CP_poles;n++){ // // Pz_cp[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Pz_cp_n[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Pz_cp_n_1[FourDMapD(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // } // } // // // else{ // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Ceze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_H; // } // //Y CPML // if(j<cpml_N_Y && i<cpml_x_lim && k<cpml_z_lim){ //Near Y PML // psi_Ez_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]=be_y_N[j]*psi_Ez_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]+ce_y_N[j]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/dy; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ez_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY+1)]; // } // if(j>=cpml_F_Y && i<cpml_x_lim && k<cpml_z_lim){ // j2 = j - cpml_F_Y; // psi_Ez_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]=be_y_F[j2]*psi_Ez_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]+ce_y_F[j2]*(hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hx[ThreeDMapD(i,j-1,k,NCELLZ,NCELLY)])/dy; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ez_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY+1)]; // } // //X PML // if(i<cpml_N_X+1 && j<cpml_y_lim && k<cpml_z_lim){//Near X-PML // psi_Ez_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=be_x_N[i]*psi_Ez_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ce_x_N[i]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/dx; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ez_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(i>=cpml_F_X && j<cpml_y_lim && k<cpml_z_lim){//far X-PML // i2 = i - cpml_F_X; // psi_Ez_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]=be_x_F[i2]*psi_Ez_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]+ce_x_F[i2]*(hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-hy[ThreeDMapD(i-1,j,k,NCELLZ,NCELLY)])/dx; // ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Cezh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Ez_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]; // } // // if(mat_matrix[ThreeDMapD(i,j,k,NCELLZ,NCELLY)] > first_medium_max){ // // for(n=0;n<N_CP_poles;n++){ // // Pz_cp[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=C_1_cp[n]*Pz_cp_n[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+C_2_cp[n]*Pz_cp_n_1[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+C_3_cp[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_4_cp[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+C_5_cp[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // for(n=0;n<N_drude_poles;n++){ // // Pz_d[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]=d_1_d[n]*Pz_d_n[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_2_d[n]*Pz_d_n_1[ThreeDMapD(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]+d_3_d[n]*ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_4_d[n]*ez_n[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+d_5_d[n]*ez_n_1[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // // } // // } // // } // // } // // } // // // // } // } // // return; // } // void CP_D_ex(int i,int j, int k, comp Curl_H,comp Div_Grad){ int n; comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; for(n=0;n<N_drude_poles;n++){ C_P_1+=(d_1_d[n]-1)*Px_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_3+=(d_2_d[n])*Px_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_NL += d_NL[n]*Div_Grad; } for(n=0;n<N_CP_poles;n++){ C_P_2+=(C_1_cp[n]-1)*Px_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; C_P_4+=(C_2_cp[n])*Px_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; } ex_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ex_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ex_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ex[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ex[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ex_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_E_2*ex_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4 - C_P_NL); } void CP_D_ey(int i,int j, int k, comp Curl_H,comp Div_Grad ){ int n; comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; //printf("here"); for(n=0;n<N_drude_poles;n++){ C_P_1+=(d_1_d[n]-1.0)*Py_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_3+=(d_2_d[n])*Py_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_NL += d_NL[n]*Div_Grad; } for(n=0;n<N_CP_poles;n++){ C_P_2+=(C_1_cp[n]-1.0)*Py_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; C_P_4+=(C_2_cp[n])*Py_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; } ey_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ey_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ey_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ey[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ey[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ey_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_E_2*ey_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); } void CP_D_ez(int i,int j, int k, comp Curl_H, comp Div_Grad){ int n; comp C_P_1,C_P_2,C_P_3,C_P_4,C_P_NL; C_P_1=C_P_2=C_P_3=C_P_4=C_P_NL=0.0; for(n=0;n<N_drude_poles;n++){ C_P_1+=(d_1_d[n]-1)*Pz_d_n[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_3+=(d_2_d[n])*Pz_d_n_1[FourDMap(i,j,k,n,N_drude_poles,NCELLZ,NCELLY)]; C_P_NL += d_NL[n]*Div_Grad; } for(n=0;n<N_CP_poles;n++){ C_P_2+=(C_1_cp[n]-1)*Pz_cp_n[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; C_P_4+=(C_2_cp[n])*Pz_cp_n_1[FourDMap(i,j,k,n,N_CP_poles,NCELLZ,NCELLY)]; } ez_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ez_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ez_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=ez[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; ez[ThreeDMap(i,j,k,NCELLZ,NCELLY)]=(1/C_E)*(dt*Curl_H/z0+C_E_1*ez_n[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_E_2*ez_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-C_P_1-C_P_2-C_P_3-C_P_4-C_P_NL); } // // // __global__ void UPDATE_hx(real *hx,real *ez,real *ey,real *Chxh,real *Chxe,real *psi_Hx_z_N,real *psi_Hx_z_F,real *psi_Hx_y_N,real *psi_Hx_y_F,real *khdy,real // *khdz,real *bh_z_N,real *bh_z_F,real *ch_z_N,real *ch_z_F,real *bh_y_N,real *bh_y_F,real *ch_y_N,real *ch_y_F,int NCELLX,int NCELLY,int NCELLZ,int Periodic_XY,real dx,real dy,real dz,real dt,int cpml_N_Z,int cpml_F_Z,int cpml_N_Y,int cpml_F_Y,int cpml_z_lim,int cpml_y_lim,int cpml_x_lim,int NcpmlZ,int NcpmlY){ // //void UPDATE_hx(void){ // // cudaProfilerStart(); // // int i,j,k,j2,k2; // comp Curl_E; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // // if(Periodic_XY){ // ////#pragma omp parallel for collapse(3) private(i,j,k,Curl_E,j2,k2) // schedule(static) // // for(k=0;k<NCELLZ;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // // if(i<NCELLX && j<NCELLY && k<NCELLZ){ // // for(k=0;k<NCELLZ-1;k++){ // //if(i==1) printf("%d %d %d\n",i,j,k); // if(j==NCELLY-1){ // #ifdef DOUBLECOMPLEX // Curl_E=(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]-(ez[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]*cexp(-I*k_y*period_y)-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]-(ez[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]; // #endif // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chxh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // // } // else{ // Curl_E=(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]-(ez[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chxh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // //Z-CPML // if(k<cpml_N_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Near Z-PML // psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]=bh_z_N[k]*psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]+ch_z_N[k]*(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]; // } // if(k>=cpml_F_Z && j<cpml_y_lim && i<cpml_x_lim){ // //Far Z-PML // k2 = k - cpml_F_Z; // psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]=bh_z_F[k2]*psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]+ch_z_F[k2]*(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]; // } // } // // } // // } // // } // } // // else{ // // //#pragma omp target device(0) MapD(Chxe[:NCELLX-1][:NCELLY-1][:NCELLZ-1],Chxh[:NCELLX-1][:NCELLY-1][:NCELLZ-1],ez[:NCELLX-1][:NCELLY-1][:NCELLZ-1],ey[:NCELLX-1][:NCELLY-1][:NCELLZ-1],khdy[:NCELLY-1],khdz[:NCELLZ-1],bh_z_N[:NcpmlZ-1],bh_z_F[:NcpmlZ-1],ch_z_N[:NcpmlZ-1],ch_z_F[:NcpmlZ-1],bh_y_N[:NcpmlY-1],bh_y_F[:NcpmlY-1],ch_y_N[:NcpmlY-1],ch_y_F[:NcpmlY-1]) MapD(tofrom:hx[:NCELLX-1][:NCELLY-1][:NCELLZ-1],psi_Hx_z_N[:NCELLX-1][:NCELLY-1][:cpml_N_Z-1],psi_Hx_z_F[:NCELLX-1][:NCELLY-1][:cpml_N_Z-1],psi_Hx_y_N[:NCELLX-1][:cpml_N_Y-1][:NCELLZ-1],psi_Hx_y_F[:NCELLX-1][:cpml_N_Y-1][:NCELLZ-1]) // // { // // //#pragma omp parallel for collapse(3) private(i,j,k,Curl_E,j2,k2) // schedule(static) // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY-1;j++){ // // for(k=0;k<NCELLZ-1;k++){ // if(i<NCELLX && j<(NCELLY-1) && k<(NCELLZ-1)){ // // Curl_E=(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]-(ez[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chxh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // //Z-CPML // if(k<cpml_N_Z && i<cpml_x_lim && j<cpml_y_lim){ // //Near Z-PML // psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]=bh_z_N[k]*psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]+ch_z_N[k]*(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]; // } // if(k>=cpml_F_Z && j<cpml_y_lim && i<cpml_x_lim){ // //Far Z-PML // k2 = k - cpml_F_Z; // psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]=bh_z_F[k2]*psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]+ch_z_F[k2]*(ey[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]; // } // //Y- PML // if(j<cpml_N_Y && i<cpml_x_lim && j<cpml_y_lim){ // psi_Hx_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]=bh_y_N[j]*psi_Hx_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]+ch_y_N[j]*(ez[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dy; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]; // } // if(j>=cpml_F_Y && i<cpml_x_lim && k<cpml_z_lim){ // j2 = j - cpml_F_Y; // psi_Hx_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]=bh_y_F[j2]*psi_Hx_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]+ch_y_F[j2]*(ez[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dy; // hx[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chxe[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hx_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]; // } // } // // } // // } // // // } // // } // } // // cudaProfilerStop(); // // return; // } // // __global__ void UPDATE_hy(real *hy,real *ez,real *ex,real *Chyh,real *Chye,real *psi_Hy_z_N,real *psi_Hy_z_F,real *psi_Hy_x_N,real *psi_Hy_x_F,real *khdx,real // *khdz,real *bh_z_N,real *bh_z_F,real *ch_z_N,real *ch_z_F,real *bh_x_N,real *bh_x_F,real *ch_x_N,real *ch_x_F,int NCELLX,int NCELLY,int NCELLZ,int Periodic_XY,real dx,real dy,real dz,real dt,int cpml_N_Z,int cpml_F_Z,int cpml_N_X,int cpml_F_X,int cpml_z_lim,int cpml_y_lim,int cpml_x_lim,int NcpmlZ,int NcpmlX){ // // int i,j,k,n,i2,k2; // comp Curl_E; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // if(Periodic_XY){ // ////#pragma omp parallel for collapse(3) private(Curl_E,i,i2,j,k,k2) // schedule(static) // // for(k=0;k<NCELLZ;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX && j<NCELLY && k<NCELLZ){ // // for(k=0;k<NCELLZ-1;k++){ // if(i==NCELLX-1){ // #ifdef DOUBLECOMPLEX // Curl_E=(ez[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]*cexp(-I*k_x*period_x)-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]-(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ez[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]-(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]; // #endif // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // else{ // Curl_E=(ez[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]-(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // //Z-PML // if(k<cpml_N_Z && j<cpml_y_lim && k<cpml_z_lim){ // psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]=bh_z_N[k]*psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]+ch_z_N[k]*(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]; // } // if(k>=cpml_F_Z && j<cpml_y_lim && k<cpml_z_lim){ // k2 = k - cpml_F_Z; // psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]=bh_z_F[k2]*psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]+ch_z_F[k2]*(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]; // } // } // // } // // } // // } // } // // else{ // ////#pragma omp parallel for collapse(3) private(Curl_E,i,i2,j,k,k2) // schedule(static) // // for(i=0;i<NCELLX-1;i++){ // // for(j=0;j<NCELLY;j++){ // // for(k=0;k<NCELLZ-1;k++){ // if(i<(NCELLX-1) && j<NCELLY && k<(NCELLZ-1)){ // // Curl_E=(ez[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]-(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdz[k]; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chyh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // // //Z-PML // if(k<cpml_N_Z && j<cpml_y_lim && k<cpml_z_lim){ // psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]=bh_z_N[k]*psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]+ch_z_N[k]*(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_z_N[ThreeDMapD(i,j,k,NcpmlZ,NCELLY)]; // } // if(k>=cpml_F_Z && j<cpml_y_lim && k<cpml_z_lim){ // k2 = k - cpml_F_Z; // psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]=bh_z_F[k2]*psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]+ch_z_F[k2]*(ex[ThreeDMapD(i,j,k+1,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dz; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_z_F[ThreeDMapD(i,j,k2,NcpmlZ,NCELLY)]; // } // //X-PML // if(i<cpml_N_X && j<cpml_y_lim && k<cpml_z_lim){ // psi_Hy_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=bh_x_N[i]*psi_Hy_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ch_x_N[i]*(ez[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dx; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(i>=cpml_F_X && j<cpml_y_lim && k<cpml_z_lim){ // i2 = i - cpml_F_X; // psi_Hy_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]=bh_x_F[i2]*psi_Hy_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]+ch_x_F[i2]*(ez[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ez[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dx; // hy[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chye[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hy_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]; // } // } // // } // // } // // } // } // return; // } // // //void UPDATE_hz(void){ // __global__ void UPDATE_hz(real *hz,real *ey,real *ex,real *Chzh,real *Chze,real *psi_Hz_x_N,real *psi_Hz_x_F,real *psi_Hz_y_N,real *psi_Hz_y_F,real *khdx,real // *khdy,real *bh_x_N,real *bh_x_F,real *ch_x_N,real *ch_x_F,real *bh_y_N,real *bh_y_F,real *ch_y_N,real *ch_y_F,int NCELLX,int NCELLY,int NCELLZ,int Periodic_XY,real dx,real dy,real dz,real dt,int cpml_N_X,int cpml_F_X,int cpml_N_Y,int cpml_F_Y,int cpml_z_lim,int cpml_y_lim,int cpml_x_lim,int NcpmlY,int NcpmlX){ // // int i,j,k,j2,i2; // comp Curl_E; // int idx = blockDim.x * blockIdx.x + threadIdx.x; // // i = idx / (NCELLZ*NCELLY); // j = (idx - i*NCELLZ*NCELLY) / NCELLZ; // k = idx - i*NCELLZ*NCELLY - j*NCELLZ; // if(Periodic_XY){ // // //#pragma omp parallel for collapse(3) private(Curl_E,i,j,k,j2,i2) // schedule(static) // // for(k=0;k<NCELLZ;k++){ // // for(i=0;i<NCELLX;i++){ // // for(j=0;j<NCELLY;j++){ // if(i<NCELLX && j<NCELLY && k<NCELLZ){ // // for(k=0;k<NCELLZ;k++){ // if(i==NCELLX-1 || j== NCELLY-1){ // if(i==NCELLX-1 && j==NCELLY-1){ // //printf("%d,%d,%d\n",i,j,k); // #ifdef DOUBLECOMPLEX // Curl_E=(ex[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]*cexp(-I*k_y*period_y)-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]*cexp(-I*k_x*period_x)-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ex[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // else if(i==NCELLX-1){ // #ifdef DOUBLECOMPLEX // Curl_E=(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]*cexp(-I*k_x*period_x)-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(0,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // else{ // #ifdef DOUBLECOMPLEX // Curl_E=(ex[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]*cexp(-I*k_y*period_y)-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // #ifdef DOUBLEPRECISION // Curl_E=(ex[ThreeDMapD(i,0,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // #endif // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // } // else{ // Curl_E=(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // } // } // // } // // } // // } // } // // else{ // // //#pragma omp parallel for collapse(3) private(Curl_E,i,j,k,j2,i2) // schedule(static) // // for(i=0;i<NCELLX-1;i++){ // // for(j=0;j<NCELLY-1;j++){ // // for(k=0;k<NCELLZ;k++){ // if(i<(NCELLX-1) && j<(NCELLY-1) && k<NCELLZ){ // // Curl_E=(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdy[j]-(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/khdx[i]; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=Chzh[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*Curl_E; // //X-PML // if(i<cpml_N_X && j<cpml_y_lim && k<cpml_z_lim){ // psi_Hz_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]=bh_x_N[i]*psi_Hz_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+ch_x_N[i]*(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dx; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hz_x_N[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]; // } // if(i>=cpml_F_X && j<cpml_y_lim && k<cpml_z_lim){ // i2 = i - cpml_F_X; // psi_Hz_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]=bh_x_F[i2]*psi_Hz_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]+ch_x_F[i2]*(ey[ThreeDMapD(i+1,j,k,NCELLZ,NCELLY)]-ey[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dx; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]-=Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hz_x_F[ThreeDMapD(i2,j,k,NCELLZ,NCELLY)]; // } // //Y-PML // if(j<cpml_N_Y && i<cpml_x_lim && k<cpml_z_lim){ // psi_Hz_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]=bh_y_N[j]*psi_Hz_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]+ch_y_N[j]*(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dy; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hz_y_N[ThreeDMapD(i,j,k,NCELLZ,NcpmlY)]; // } // if(j>=cpml_F_Y && i<cpml_x_lim && k<cpml_z_lim){ // j2 = j - cpml_F_Y; // psi_Hz_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]=bh_y_F[j2]*psi_Hz_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]+ch_y_F[j2]*(ex[ThreeDMapD(i,j+1,k,NCELLZ,NCELLY)]-ex[ThreeDMapD(i,j,k,NCELLZ,NCELLY)])/dy; // hz[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]+=Chze[ThreeDMapD(i,j,k,NCELLZ,NCELLY)]*psi_Hz_y_F[ThreeDMapD(i,j2,k,NCELLZ,NcpmlY)]; // } // } // // } // // } // // } // } // return; // } // void UpdateHydroPx(void){ // int i,j,k; // real Vx1,Vx2,Vx3,Vy1,Vy2,Vy3,Vz1,Vz2,Vz3,Hx1,Hz1,Hy1,Hx2,Hz2,Hy2,Ex1,Ey1,Ez1,VdotGrad,VdotGrad2,DivV,VcrossH,VcrossH2,Pressure,ND1,ND2,ND3,Grad_Div,Grad_Div2; // real INV_DX,INV_DY,INV_DZ; // INV_DX = 1.0/dx; // INV_DY = 1.0/dy; // INV_DZ = 1.0/dz; // Grad_Div = 0.0; // Grad_Div2 =0.0; // ////#pragma omp parallel for collapse(3) // schedule(static) // for(i=0;i<NCELLX-1;i++){ // for(j=1;j<NCELLY-1;j++){ // for(k=1;k<NCELLZ-1;k++){ // if(mat_matrixX[ThreeDMap(i,j,k,NCELLZ,NCELLY)]> first_medium && mat_matrixX[ThreeDMap(i,j,k,NCELLZ,NCELLY)] < 6){ // ND1 = N_EQ + NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; // // Grad_Div = Calc_DIV_GRADx(i,j,k); // Grad_Div2 = Calc_DIV_GRADx2(i,j,k); // // // Vx1 = 0.5 * (Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vx2 = 0.5 * (Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vy1 = 0.25 * (Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vy2 = 0.25 * (Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // //Vy1 = 0.5 * (Vy1 + Vy2); // Vz1 = 0.25 * (Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // Vz2 = 0.25 * (Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // //Vz1 = 0.5 * (Vz1 + Vz2); // Hy2 = 0.5 * (hyPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hyPrev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); // Hz2 = 0.5 * (hzPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hzPrev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]); // Hy1 = 0.5 * (hy[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hy[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); // Hz1 = 0.5 * (hz[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hz[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]); // // if(WithConvection) { // VdotGrad = 0.5*(Vx1*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy1*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz1*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad2 = 0.5*(Vx2*(Px_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy2*(Px_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz2*(Px_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Px_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad = (VdotGrad - VdotGrad2)/dt; // } // else VdotGrad = 0.0; // // if(WithMagField){ // VcrossH = Vy1*Hz1 - Vz1*Hy1; // VcrossH2 = Vy2*Hz2 - Vz2*Hy2; // VcrossH = (VcrossH - VcrossH2)/dt; // } // else VcrossH = 0.0; // // Px_d[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] = d_1_d[0]*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_2_d[0]*Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] +d_3_d[0]*ex[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_4_d[0]*ex_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_NL[0]*(Grad_Div + Grad_Div2/N_EQ)/pow(ND1,1.0/3.0) + d_5_d[0]*(VdotGrad + VcrossH); // // } // // } // } // } // } // // void UpdateHydroPy(void){ // int i,j,k; // real Vx1,Vx2,Vx3,Vy1,Vy2,Vy3,Vz1,Vz2,Vz3,Hx1,Hz1,Hy1,Hx2,Hz2,Hy2,Ex1,Ey1,Ez1,VdotGrad,VdotGrad2,DivV,VcrossH,VcrossH2,Pressure,ND1,ND2,ND3,Grad_Div,Grad_Div2; // real INV_DX,INV_DY,INV_DZ; // INV_DX = 1.0/dx; // INV_DY = 1.0/dy; // INV_DZ = 1.0/dz; // Grad_Div = 0.0; // Grad_Div2 =0.0; // ////#pragma omp parallel for collapse(3) // schedule(static) // for(i=1;i<NCELLX-1;i++){ // for(j=0;j<NCELLY-1;j++){ // for(k=1;k<NCELLZ-1;k++){ // if(mat_matrixY[ThreeDMap(i,j,k,NCELLZ,NCELLY)]> first_medium && mat_matrixY[ThreeDMap(i,j,k,NCELLZ,NCELLY)] < 6){ // // Grad_Div = Calc_DIV_GRADy(i,j,k); // Grad_Div2 = Calc_DIV_GRADy2(i,j,k); // // ND1 = N_EQ + NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; // // Vy1 = 0.5 * (Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vy2 = 0.5 * (Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx1 = 0.25 * (Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vx2 = 0.25 * (Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx1 = 0.5 * (Vx1 + Vx2); // Vz1 = 0.25 * (Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // Vz2 = 0.25 * (Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); // //Vz1 = 0.5 * (Vz1 + Vz2); // Hx1 = 0.5 * (hx[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hx[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); // Hz1 = 0.5 * (hz[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hz[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]); // Hx2 = 0.5 * (hxPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hxPrev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); // Hz2 = 0.5 * (hzPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hzPrev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]); // // // if(WithMagField){ // VcrossH = Vz1*Hx1 - Vx1*Hz1; // VcrossH2 = Vz2*Hx2 - Vx2*Hz2; // VcrossH = (VcrossH - VcrossH2)/dt; // } // else VcrossH = 0.0; // if(WithConvection) { // VdotGrad = 0.5*(Vx1*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy1*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz1*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad2 = 0.5*(Vx2*(Py_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy2*(Py_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz2*(Py_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Py_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad = (VdotGrad - VdotGrad2)/dt; // } // else VdotGrad = 0.0; // // Py_d[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] = d_1_d[0]*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_2_d[0]*Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_3_d[0]*ey[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_4_d[0]*ey_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_NL[0]*(Grad_Div + Grad_Div2/N_EQ)/pow(ND1,1.0/3.0) + d_5_d[0]*(VdotGrad + VcrossH); // // } // // // } // } // } // } // // void UpdateHydroPz(void){ // int i,j,k; // real Vx1,Vx2,Vx3,Vy1,Vy2,Vy3,Vz1,Vz2,Vz3,Hx1,Hz1,Hy1,Hx2,Hz2,Hy2,Ex1,Ey1,Ez1,VdotGrad,VdotGrad2,DivV,VcrossH,VcrossH2,Pressure,ND1,ND2,ND3,Grad_Div,Grad_Div2; // real INV_DX,INV_DY,INV_DZ; // INV_DX = 1.0/dx; // INV_DY = 1.0/dy; // INV_DZ = 1.0/dz; // Grad_Div = 0.0; // Grad_Div2 =0.0; // ////#pragma omp parallel for collapse(3) // schedule(static) // for(i=1;i<NCELLX-1;i++){ // for(j=1;j<NCELLY-1;j++){ // for(k=0;k<NCELLZ-1;k++){ // if(mat_matrixZ[ThreeDMap(i,j,k,NCELLZ,NCELLY)]> first_medium && mat_matrixZ[ThreeDMap(i,j,k,NCELLZ,NCELLY)] < 6){ // // Grad_Div = Calc_DIV_GRADz(i,j,k); // Grad_Div2 = Calc_DIV_GRADz2(i,j,k); // // ND1 = N_EQ + NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]; // // Vz1 = 0.5 * (Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vz2 = 0.5 * (Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vx1 = 0.25 * (Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vx2 = 0.25 * (Px_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // //Vx1 = 0.5 * (Vx1 + Vx2); // Vy1 = 0.25 * (Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // Vy2 = 0.25 * (Py_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]); // // Vy1 = 0.5 * (Vy1 + Vy2); // Hy1 = 0.5 * (hy[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)] + hy[ThreeDMap(i,j,k,NCELLZ,NCELLY)]); // Hx1 = 0.5 * (hx[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hx[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]); // Hy2 = 0.5 * (hyPrev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)] + hyPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]); // Hx2 = 0.5 * (hxPrev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + hxPrev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]); // // if(WithMagField){ // VcrossH = Vx1*Hy1 - Vy1*Hx1; // VcrossH2 = Vx2*Hy2 - Vy2*Hx2; // VcrossH = (VcrossH - VcrossH2)/dt; // } // else VcrossH = 0.0; // // if(WithConvection){ // VdotGrad = 0.5*(Vx1*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy1*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz1*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad2 = 0.5*(Vx2*(Pz_d_n_1[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n_1[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DX + Vy2*(Pz_d_n_1[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n_1[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DY + Vz2*(Pz_d_n_1[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - Pz_d_n_1[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)])*INV_DZ); // VdotGrad = (VdotGrad - VdotGrad2)/dt; // } // else VdotGrad = 0.0; // // Pz_d[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] = d_1_d[0]*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_2_d[0]*Pz_d_n_1[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + d_3_d[0]*ez[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_4_d[0]*ez_n_1[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + d_NL[0]*(Grad_Div + Grad_Div2/N_EQ)/pow(ND1,1.0/3.0) + d_5_d[0]*(VdotGrad + VcrossH); // // } // // // } // } // } // } // // // comp Calc_DIV_GRADx2(int i,int j, int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i+1,j,k,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i+1,j,k,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i+1,j-1,k,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i+1,j,k,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i+1,j,k-1,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); } comp Calc_DIV_GRADx(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; for(n=0;n<N_drude_poles;n++){ if(Diverge_Gradient){ if(i==0 && j==0){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==NCELLX-1 && j==0){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(0,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(0,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==NCELLX-1){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(0,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(0,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(0,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==0){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(j==0){ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else{ Div_Grad = INV_DX*INV_DX*(Px_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Py_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i+1,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Pz_d_n[FourDMap(i+1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } } } return Div_Grad; } comp Calc_DIV_GRADy2(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j+1,k,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j+1,k,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j+1,k,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j+1,k,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j+1,k-1,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]); return Div_Grad; } comp Calc_DIV_GRADy(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; for(n=0;n<N_drude_poles;n++){ if(Diverge_Gradient){ if(i==0 && j==0){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==0 && j==NCELLY-1){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,0,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,0,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(j==NCELLY-1){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,0,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,0,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,0,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(j==0){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==0){ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } else{ Div_Grad = INV_DY*INV_DY*(Py_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DY*(Px_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DY*INV_DZ*(Pz_d_n[FourDMap(i,j+1,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Pz_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]); } } } //printf("%e\n",Div_Grad); return Div_Grad; } comp Calc_DIV_GRADz2(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k+1,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]*NDz_prev[ThreeDMap(i,j,k-1,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j,k+1,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j-1,k+1,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDy_prev[ThreeDMap(i,j-1,k,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j,k+1,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i,j,k,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j,k+1,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]*NDx_prev[ThreeDMap(i-1,j,k,NCELLZ,NCELLY)]); return Div_Grad; } comp Calc_DIV_GRADz(int i,int j,int k){ comp Div_Grad; real INV_DX = 1.0/dx; real INV_DY = 1.0/dy; real INV_DZ = 1.0/dz; int n; for(n=0;n<N_drude_poles;n++){ if(Diverge_Gradient){ if(i==0 && j==0){ Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,NCELLY-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(i==0){ Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(NCELLX-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(NCELLX-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); } else if(j==0){ Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,NCELLY-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,NCELLY-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); } else{ Div_Grad = INV_DZ*INV_DZ*(Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DZ*INV_DY*(Py_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k,0,N_drude_poles,NCELLZ,NCELLY)]) + INV_DX*INV_DZ*(Px_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[FourDMap(i-1,j,k,0,N_drude_poles,NCELLZ,NCELLY)]); // if(t==173 && k==102){ // printf("%e\t%e\t%e\n",Pz_d_n[FourDMap(i,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)] - 2.0*Pz_d_n[FourDMap(i,j,k,0,N_drude_poles,NCELLZ,NCELLY)] + Pz_d_n[FourDMap(i,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)],Py_d_n[i][j+1][k+1][0]-Py_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]-Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[i][j-1][k-1][0],Px_d_n[i+1][j][k+1][0]-Px_d_n[FourDMap(i+1,j,k-1,0,N_drude_poles,NCELLZ,NCELLY)]-Px_d_n[FourDMap(i-1,j,k+1,0,N_drude_poles,NCELLZ,NCELLY)]+Px_d_n[i-1][j][k-1][0]); // printf("%e\n",Py_d_n[i][j+1][k+1][0]-(Py_d_n[FourDMap(i,j+1,k-1,0,N_drude_poles,NCELLZ,NCELLY)]+Py_d_n[FourDMap(i,j-1,k+1,0,N_drude_poles,NCELLZ,NCELLY)])+Py_d_n[i][j-1][k-1][0]); // // } //1.186946e-66 } // printf("%e\n",Div_Grad); } } return Div_Grad; }
1ab224e26d831144f8f8c48e0c07a3e1f5a7e479.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void Replace(float *WHAT , float *WHERE) { int idx = threadIdx.x + blockIdx.x*blockDim.x; WHERE[idx] = WHAT[idx]; }
1ab224e26d831144f8f8c48e0c07a3e1f5a7e479.cu
#include "includes.h" __global__ void Replace(float *WHAT , float *WHERE) { int idx = threadIdx.x + blockIdx.x*blockDim.x; WHERE[idx] = WHAT[idx]; }
10c7b2657c80565777d34a4127c168b3b2a07a3f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifndef _ORDERGRAPH_KERNEL_H_ #define _ORDERGRAPH_KERNEL_H_ #include <stdio.h> #include "data45.cu" ; char name[20] = "45.out"; __device__ void Dincr(int *bit,int n); __device__ void DincrS(int *bit,int n); __device__ bool D_getState(int parN,int *sta,int time); __device__ void D_findComb(int* comb, int l, int n); __device__ int D_findindex(int *arr, int size); __device__ int D_C(int n, int a); __global__ void genScoreKernel(int sizepernode, float *D_localscore,int *D_data, float *D_LG){ int id=blockIdx.x*256+threadIdx.x; int node,index; bool flag; int parent[5]={0}; int pre[NODE_N]={0}; int state[5]={0}; int i,j,parN=0,tmp,t; int t1=0,t2=0; float ls=0; int Nij[STATE_N]={0}; if(id<sizepernode){ D_findComb(parent,id,NODE_N-1); for(i=0;i<4;i++) { if(parent[i]>0) parN++; } for(node=0;node<NODE_N;node++){ j=1; for(i=0;i<NODE_N;i++) { if(i!=node)pre[j++]=i; } for(tmp=0;tmp<parN;tmp++) state[tmp]=0; index=sizepernode*node+id; //priors /* for(tmp=1;tmp<=4;tmp++){ localscore[index]+=100*(prior[node][pre[parent[tmp]]]-0.5)*(prior[node][pre[parent[tmp]]]-0.5)*(prior[node][pre[parent[tmp]]]-0.5); } */ t=0; while(D_getState(parN,state,t++)){ // for get state //printf("test %u\n",id); ls=0; for(tmp=0;tmp<STATE_N;tmp++) Nij[tmp]=0; for(t1=0;t1<DATA_N;t1++){ flag=true; for(t2=0;t2<parN;t2++){ if(D_data[t1*NODE_N+pre[parent[t2]]]!=state[t2]) { flag=false; break; } } if(!flag) continue; Nij[D_data[t1*NODE_N+node]]++; } tmp=STATE_N-1; for(t1=0;t1<STATE_N;t1++){ ls+=D_LG[Nij[t1]]; tmp+=Nij[t1]; } ls-=D_LG[tmp]; ls+=D_LG[STATE_N-1]; D_localscore[index]+=ls; } } } } __global__ void computeKernel(int taskperthr,int sizepernode, float *D_localscore, bool *D_parent, int node, int total, float *D_Score, int *D_resP) { extern __shared__ float lsinblock[]; const unsigned int id = blockIdx.x*256 + threadIdx.x; const unsigned int tid = threadIdx.x; const unsigned int bid = blockIdx.x; int posN=1,i,index,t,tmp; int pre[NODE_N]={0}; int parN=0; int bestparent[4]={0},parent[5]={-1}; float bestls=-999999999999999,ls; for(i=0;i<NODE_N;i++){ if(D_parent[i]==1){pre[posN++]=i;} } for(i=0;i<taskperthr&&((id*taskperthr+i)<total);i++){ D_findComb(parent,id*taskperthr+i,posN); for(parN=0;parN<4;parN++){ if(parent[parN]<0) break; if(pre[parent[parN]]>node) parent[parN]=pre[parent[parN]]; else parent[parN]=pre[parent[parN]]+1; } for(tmp=parN;tmp>0;tmp--){ parent[tmp]=parent[tmp-1]; } parent[0]=0; index=D_findindex(parent,parN); index+=sizepernode*node; ls=D_localscore[index]; if(ls>bestls){ bestls=ls; for(tmp=0;tmp<4;tmp++) bestparent[tmp]=parent[tmp+1]; } } lsinblock[tid]=bestls; __syncthreads(); for(i=128;i>=1;i/=2){ if(tid<i){ if(lsinblock[tid+i]>lsinblock[tid]&&lsinblock[tid+i]<0){ lsinblock[tid]=lsinblock[tid+i]; lsinblock[tid+i]=(float)(tid+i); } else if(lsinblock[tid+i]<lsinblock[tid]&&lsinblock[tid]<0){ lsinblock[tid+i]=(float)tid; } else if(lsinblock[tid]>0&&lsinblock[tid+i]<0){ lsinblock[tid]=lsinblock[tid+i]; lsinblock[tid+i]=(float)(tid+i); } else if(lsinblock[tid]<0&&lsinblock[tid+i]>0){ lsinblock[tid+i]=(float)tid; } } __syncthreads(); } __syncthreads(); if(tid==0){ D_Score[bid]=lsinblock[0]; t=0; for(i=0;i<7&&t<128&&t>=0;i++){ t=(int)lsinblock[(int)powf(2.0,i)+t]; } lsinblock[0]=(float)t; } __syncthreads(); if(tid==(int)lsinblock[0]){ for(i=0;i<4;i++){ D_resP[bid*4+i]=bestparent[i]; } } } __device__ void Dincr(int *bit,int n){ while(n<=NODE_N){ bit[n]++; if(bit[n]>=2) { bit[n]=0; n++; } else{ break; } } return; } __device__ void DincrS(int *bit,int n){ bit[n]++; if(bit[n]>=STATE_N) { bit[n]=0; Dincr(bit,n+1); } return; } __device__ bool D_getState(int parN,int *sta,int time){ int i,j=1; for(i=0;i<parN;i++){ j*=STATE_N; } j--; if(time>j) return false; if(time>=1) DincrS(sta,0); return true; } __device__ void D_findComb(int* comb, int l, int n) { const int len = 4; if (l == 0) { for (int i = 0; i < len; i++) comb[i] = -1; return; } int sum = 0; int k = 1; while (sum < l) sum += D_C(n,k++); l -= sum - D_C(n,--k); int low = 0; int pos = 0; while (k > 1) { sum = 0; int s = 1; while (sum < l) sum += D_C(n-s++,k-1); l -= sum - D_C(n-(--s),--k); low += s; comb[pos++] = low; n -= s; } comb[pos] = low + l; for (int i = pos+1; i < 4; i++) comb[i] = -1; } __device__ int D_findindex(int *arr, int size){ //reminder: arr[0] has to be 0 && size == array size-1 && index start from 0 int i,j,index=0; for(i=1;i<size;i++){ index+=D_C(NODE_N-1,i); } for(i=1;i<=size-1;i++){ for(j=arr[i-1]+1;j<=arr[i]-1;j++){ index+=D_C(NODE_N-1-j,size-i); } } index+=arr[size]-arr[size-1]; return index; } __device__ int D_C(int n, int a){ int i,res=1,atmp=a; for(i=0;i<atmp;i++){ res*=n; n--; } for(i=0;i<atmp;i++){ res/=a; a--; } return res; } #endif
10c7b2657c80565777d34a4127c168b3b2a07a3f.cu
#ifndef _ORDERGRAPH_KERNEL_H_ #define _ORDERGRAPH_KERNEL_H_ #include <stdio.h> #include "data45.cu" ; char name[20] = "45.out"; __device__ void Dincr(int *bit,int n); __device__ void DincrS(int *bit,int n); __device__ bool D_getState(int parN,int *sta,int time); __device__ void D_findComb(int* comb, int l, int n); __device__ int D_findindex(int *arr, int size); __device__ int D_C(int n, int a); __global__ void genScoreKernel(int sizepernode, float *D_localscore,int *D_data, float *D_LG){ int id=blockIdx.x*256+threadIdx.x; int node,index; bool flag; int parent[5]={0}; int pre[NODE_N]={0}; int state[5]={0}; int i,j,parN=0,tmp,t; int t1=0,t2=0; float ls=0; int Nij[STATE_N]={0}; if(id<sizepernode){ D_findComb(parent,id,NODE_N-1); for(i=0;i<4;i++) { if(parent[i]>0) parN++; } for(node=0;node<NODE_N;node++){ j=1; for(i=0;i<NODE_N;i++) { if(i!=node)pre[j++]=i; } for(tmp=0;tmp<parN;tmp++) state[tmp]=0; index=sizepernode*node+id; //priors /* for(tmp=1;tmp<=4;tmp++){ localscore[index]+=100*(prior[node][pre[parent[tmp]]]-0.5)*(prior[node][pre[parent[tmp]]]-0.5)*(prior[node][pre[parent[tmp]]]-0.5); } */ t=0; while(D_getState(parN,state,t++)){ // for get state //printf("test %u\n",id); ls=0; for(tmp=0;tmp<STATE_N;tmp++) Nij[tmp]=0; for(t1=0;t1<DATA_N;t1++){ flag=true; for(t2=0;t2<parN;t2++){ if(D_data[t1*NODE_N+pre[parent[t2]]]!=state[t2]) { flag=false; break; } } if(!flag) continue; Nij[D_data[t1*NODE_N+node]]++; } tmp=STATE_N-1; for(t1=0;t1<STATE_N;t1++){ ls+=D_LG[Nij[t1]]; tmp+=Nij[t1]; } ls-=D_LG[tmp]; ls+=D_LG[STATE_N-1]; D_localscore[index]+=ls; } } } } __global__ void computeKernel(int taskperthr,int sizepernode, float *D_localscore, bool *D_parent, int node, int total, float *D_Score, int *D_resP) { extern __shared__ float lsinblock[]; const unsigned int id = blockIdx.x*256 + threadIdx.x; const unsigned int tid = threadIdx.x; const unsigned int bid = blockIdx.x; int posN=1,i,index,t,tmp; int pre[NODE_N]={0}; int parN=0; int bestparent[4]={0},parent[5]={-1}; float bestls=-999999999999999,ls; for(i=0;i<NODE_N;i++){ if(D_parent[i]==1){pre[posN++]=i;} } for(i=0;i<taskperthr&&((id*taskperthr+i)<total);i++){ D_findComb(parent,id*taskperthr+i,posN); for(parN=0;parN<4;parN++){ if(parent[parN]<0) break; if(pre[parent[parN]]>node) parent[parN]=pre[parent[parN]]; else parent[parN]=pre[parent[parN]]+1; } for(tmp=parN;tmp>0;tmp--){ parent[tmp]=parent[tmp-1]; } parent[0]=0; index=D_findindex(parent,parN); index+=sizepernode*node; ls=D_localscore[index]; if(ls>bestls){ bestls=ls; for(tmp=0;tmp<4;tmp++) bestparent[tmp]=parent[tmp+1]; } } lsinblock[tid]=bestls; __syncthreads(); for(i=128;i>=1;i/=2){ if(tid<i){ if(lsinblock[tid+i]>lsinblock[tid]&&lsinblock[tid+i]<0){ lsinblock[tid]=lsinblock[tid+i]; lsinblock[tid+i]=(float)(tid+i); } else if(lsinblock[tid+i]<lsinblock[tid]&&lsinblock[tid]<0){ lsinblock[tid+i]=(float)tid; } else if(lsinblock[tid]>0&&lsinblock[tid+i]<0){ lsinblock[tid]=lsinblock[tid+i]; lsinblock[tid+i]=(float)(tid+i); } else if(lsinblock[tid]<0&&lsinblock[tid+i]>0){ lsinblock[tid+i]=(float)tid; } } __syncthreads(); } __syncthreads(); if(tid==0){ D_Score[bid]=lsinblock[0]; t=0; for(i=0;i<7&&t<128&&t>=0;i++){ t=(int)lsinblock[(int)powf(2.0,i)+t]; } lsinblock[0]=(float)t; } __syncthreads(); if(tid==(int)lsinblock[0]){ for(i=0;i<4;i++){ D_resP[bid*4+i]=bestparent[i]; } } } __device__ void Dincr(int *bit,int n){ while(n<=NODE_N){ bit[n]++; if(bit[n]>=2) { bit[n]=0; n++; } else{ break; } } return; } __device__ void DincrS(int *bit,int n){ bit[n]++; if(bit[n]>=STATE_N) { bit[n]=0; Dincr(bit,n+1); } return; } __device__ bool D_getState(int parN,int *sta,int time){ int i,j=1; for(i=0;i<parN;i++){ j*=STATE_N; } j--; if(time>j) return false; if(time>=1) DincrS(sta,0); return true; } __device__ void D_findComb(int* comb, int l, int n) { const int len = 4; if (l == 0) { for (int i = 0; i < len; i++) comb[i] = -1; return; } int sum = 0; int k = 1; while (sum < l) sum += D_C(n,k++); l -= sum - D_C(n,--k); int low = 0; int pos = 0; while (k > 1) { sum = 0; int s = 1; while (sum < l) sum += D_C(n-s++,k-1); l -= sum - D_C(n-(--s),--k); low += s; comb[pos++] = low; n -= s; } comb[pos] = low + l; for (int i = pos+1; i < 4; i++) comb[i] = -1; } __device__ int D_findindex(int *arr, int size){ //reminder: arr[0] has to be 0 && size == array size-1 && index start from 0 int i,j,index=0; for(i=1;i<size;i++){ index+=D_C(NODE_N-1,i); } for(i=1;i<=size-1;i++){ for(j=arr[i-1]+1;j<=arr[i]-1;j++){ index+=D_C(NODE_N-1-j,size-i); } } index+=arr[size]-arr[size-1]; return index; } __device__ int D_C(int n, int a){ int i,res=1,atmp=a; for(i=0;i<atmp;i++){ res*=n; n--; } for(i=0;i<atmp;i++){ res/=a; a--; } return res; } #endif
f80fa5beadbdb34096362b1db8dd079cc46e528f.hip
// !!! This is a file automatically generated by hipify!!! // cuda_functions.cu #ifndef CUDA_FUNCTIONS_ALLOCATOR_H #define CUDA_FUNCTIONS_ALLOCATOR_H #include <cstdlib> #include <cstring> #include <new> #include <hip/hip_runtime.h> #include <algorithm> #include <cuda_occupancy.h> #include <Handle.cpp> #include <cstring> #include <cmath> #include <type_traits> #include <thrust/iterator/reverse_iterator.h> #include <sys/unistd.h> #include "exceptions.cpp" #include <iostream> namespace Flamingo{ namespace Memory{ template <typename pointer, typename Item> __global__ void cuda_fill(pointer dst, int count, Item item) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < count; i += gridDim.x * blockDim.x) { *(dst + i) = item; } } enum IndexType { OVERLAP, BLOCK }; template <IndexType E> __device__ __host__ int getSourceIndex(int tid, int block, int offset) { return 0; } template <> __device__ __host__ int getSourceIndex<OVERLAP>(int tid, int block, int offset) { return block * ((tid) / offset) + (tid % offset) + block; } template <> __device__ __host__ int getSourceIndex<BLOCK>(int tid, int block, int offset) { return block * ((tid) / (block - offset)) + (tid % (block - offset)) + offset; } template <typename T, typename U> __global__ void cuda_overlapextract(T src, U* tmp, int block, int off, int count) { int bid = blockIdx.x * blockDim.x; int tid = bid + threadIdx.x; //negative just reverse iterations //tmp_index int src_index = getSourceIndex<OVERLAP>(tid, block, off); int tmp_index = tid; int stride_src = blockDim.x * block; int stride_dst = blockDim.x * off; int i = 0; while (src_index < count) { tmp[tmp_index] = src[src_index]; i++; tmp_index += stride_dst * i; src_index += stride_src * i; }; }; template <typename T, typename L> __global__ void cuda_blockmove(T dst, T src, int block, int off, int count) { int bid = blockIdx.x * blockDim.x; int tid = bid + threadIdx.x; extern __shared__ char share[]; L* share_ptr=(L*)(&share); int src_index; int shared_index; int dst_index; //negatice just revese iterations src_index = getSourceIndex<BLOCK>(tid, block, off); shared_index = tid; dst_index = src_index - off; int stride = blockDim.x + off; L tmp; int i = 0; int dst_or = dst_index; int shared_or = shared_index; while (src_index < count) { tmp = src[src_index]; share_ptr[shared_index] = tmp; i++; src_index += stride; shared_index += blockDim.x; }; __syncthreads(); i = 0; dst_index = dst_or; shared_index = shared_or; while (dst_index < count) { tmp = share_ptr[shared_index]; dst[dst_index] = tmp; i--; shared_index += blockDim.x; dst_index += stride; }; }; template <typename T, typename U> __global__ void cuda_overlapinsert(T dst, U* tmp, int block, int off, int count) { int bid = blockIdx.x * blockDim.x; int tid = bid + threadIdx.x; //negative just revesre iteration int src_index = tid; int dst_index = getSourceIndex<OVERLAP>(tid, block, off) - off; int stride = blockDim.x * off; int i = 0; while (dst_index + off < count) { dst[dst_index] = tmp[src_index]; i++; src_index += stride * i; dst_index += stride * i; } }; template <typename pointer, typename value_type> void cuda_memmove(pointer src_ptr, pointer dst_ptr, value_type* tmp, int groupsize, int offset, int totalsize, int* mingridsize, int* blocksize) { gpuErrorCheck(hipGetLastError() ); cuda_overlapextract<pointer, value_type> << <mingridsize[0], blocksize[0]>>> (dst_ptr, tmp, groupsize, offset, totalsize); //block moves gpuErrorCheck(hipGetLastError() ); gpuErrorCheck(hipDeviceSynchronize() ); int SMem = blocksize[1] * sizeof(value_type); cuda_blockmove<pointer, value_type> << <mingridsize[0], blocksize[1], SMem>>> (dst_ptr, dst_ptr, groupsize, offset, totalsize); gpuErrorCheck(hipGetLastError() ); gpuErrorCheck(hipDeviceSynchronize() ); //insert overlaps cuda_overlapinsert<pointer, value_type> << <mingridsize[0], blocksize[2]>>> (dst_ptr, tmp, groupsize, offset, totalsize); gpuErrorCheck(hipGetLastError() ); gpuErrorCheck(hipDeviceSynchronize() ); }; }//end Memory }//end Flamingo #endif
f80fa5beadbdb34096362b1db8dd079cc46e528f.cu
// cuda_functions.cu #ifndef CUDA_FUNCTIONS_ALLOCATOR_H #define CUDA_FUNCTIONS_ALLOCATOR_H #include <cstdlib> #include <cstring> #include <new> #include <cuda.h> #include <algorithm> #include <cuda_occupancy.h> #include <Handle.cpp> #include <cstring> #include <cmath> #include <type_traits> #include <thrust/iterator/reverse_iterator.h> #include <sys/unistd.h> #include "exceptions.cpp" #include <iostream> namespace Flamingo{ namespace Memory{ template <typename pointer, typename Item> __global__ void cuda_fill(pointer dst, int count, Item item) { int idx = blockIdx.x * blockDim.x + threadIdx.x; for (int i = idx; i < count; i += gridDim.x * blockDim.x) { *(dst + i) = item; } } enum IndexType { OVERLAP, BLOCK }; template <IndexType E> __device__ __host__ int getSourceIndex(int tid, int block, int offset) { return 0; } template <> __device__ __host__ int getSourceIndex<OVERLAP>(int tid, int block, int offset) { return block * ((tid) / offset) + (tid % offset) + block; } template <> __device__ __host__ int getSourceIndex<BLOCK>(int tid, int block, int offset) { return block * ((tid) / (block - offset)) + (tid % (block - offset)) + offset; } template <typename T, typename U> __global__ void cuda_overlapextract(T src, U* tmp, int block, int off, int count) { int bid = blockIdx.x * blockDim.x; int tid = bid + threadIdx.x; //negative just reverse iterations //tmp_index int src_index = getSourceIndex<OVERLAP>(tid, block, off); int tmp_index = tid; int stride_src = blockDim.x * block; int stride_dst = blockDim.x * off; int i = 0; while (src_index < count) { tmp[tmp_index] = src[src_index]; i++; tmp_index += stride_dst * i; src_index += stride_src * i; }; }; template <typename T, typename L> __global__ void cuda_blockmove(T dst, T src, int block, int off, int count) { int bid = blockIdx.x * blockDim.x; int tid = bid + threadIdx.x; extern __shared__ char share[]; L* share_ptr=(L*)(&share); int src_index; int shared_index; int dst_index; //negatice just revese iterations src_index = getSourceIndex<BLOCK>(tid, block, off); shared_index = tid; dst_index = src_index - off; int stride = blockDim.x + off; L tmp; int i = 0; int dst_or = dst_index; int shared_or = shared_index; while (src_index < count) { tmp = src[src_index]; share_ptr[shared_index] = tmp; i++; src_index += stride; shared_index += blockDim.x; }; __syncthreads(); i = 0; dst_index = dst_or; shared_index = shared_or; while (dst_index < count) { tmp = share_ptr[shared_index]; dst[dst_index] = tmp; i--; shared_index += blockDim.x; dst_index += stride; }; }; template <typename T, typename U> __global__ void cuda_overlapinsert(T dst, U* tmp, int block, int off, int count) { int bid = blockIdx.x * blockDim.x; int tid = bid + threadIdx.x; //negative just revesre iteration int src_index = tid; int dst_index = getSourceIndex<OVERLAP>(tid, block, off) - off; int stride = blockDim.x * off; int i = 0; while (dst_index + off < count) { dst[dst_index] = tmp[src_index]; i++; src_index += stride * i; dst_index += stride * i; } }; template <typename pointer, typename value_type> void cuda_memmove(pointer src_ptr, pointer dst_ptr, value_type* tmp, int groupsize, int offset, int totalsize, int* mingridsize, int* blocksize) { gpuErrorCheck(cudaGetLastError() ); cuda_overlapextract<pointer, value_type> << <mingridsize[0], blocksize[0]>>> (dst_ptr, tmp, groupsize, offset, totalsize); //block moves gpuErrorCheck(cudaGetLastError() ); gpuErrorCheck(cudaDeviceSynchronize() ); int SMem = blocksize[1] * sizeof(value_type); cuda_blockmove<pointer, value_type> << <mingridsize[0], blocksize[1], SMem>>> (dst_ptr, dst_ptr, groupsize, offset, totalsize); gpuErrorCheck(cudaGetLastError() ); gpuErrorCheck(cudaDeviceSynchronize() ); //insert overlaps cuda_overlapinsert<pointer, value_type> << <mingridsize[0], blocksize[2]>>> (dst_ptr, tmp, groupsize, offset, totalsize); gpuErrorCheck(cudaGetLastError() ); gpuErrorCheck(cudaDeviceSynchronize() ); }; }//end Memory }//end Flamingo #endif
a7bb9b454999971d98124773ce1af71f1bcab89d.hip
// !!! This is a file automatically generated by hipify!!! /* ### PROGRAM DESCIPTION ### ## Assigment No. 06 */ #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include< iostream> #include <ctime> using namespace std; /* #CUDA PROGRAM STRUCTURE 1. Memory Allocation on CPU and GPU 2. Initialization of Memory in CPU 3. Memcpy to GPU 4. Kernel Invocation 5. Memcpy to CPU */ //----------------[START] CUDA KERNEL CODE --------------------------- __global__ void MulKernel(int *A, int *B, int *C,int WIDTH, int HEIGHT) { const int Mat_SIZE = WIDTH * HEIGHT; int col = ( blockDim.x * blockIdx.x) + threadIdx.x; int row = ( blockDim.y * blockIdx.y) + threadIdx.y; //int index = col + ( WIDTH * row); if( row < HEIGHT && col < WIDTH){ int value = 0; // will run calculate product for a pixel for(int i = 0; i<WIDTH; i++){ value += A[ (row * HEIGHT) + i ] * B [ (i * WIDTH) + col ]; } C[row*WIDTH+col] = value; } } //---------------- [END] CUDA KERNEL CODE ---------------------------- //will show the memory used by one array i.e a/b/c // viewMemoryUse( WIDTH, HEIGHT) : specific to this program void viewMemUse(int, int); // IniArray( ARRAY, WIDTH, HEIGHT, RandomValueSeed) void initializeArray(int*, int, int, int); // DisplayArray( arrayNAme i.e H_A, array, width, height) void displayArray(char*, int *,int,int); // void mulKernelCPU( int*, int*, int*, int, int); // void compareResult( int *arrayA, int *arrayB, int width, int height); int main() { const int WIDTH = 2048; const int HEIGHT = 2048; int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; //## 1. Memory Allocation on HOST & DEVICE //1.a Memory allocation on HOST int SIZE_IN_BYTES = WIDTH * HEIGHT * sizeof(int); h_a = (int *) malloc( SIZE_IN_BYTES); // since square matrix so A = [ WIDTH * WIDTH] h_b = (int *) malloc( SIZE_IN_BYTES); h_c = (int *) malloc( SIZE_IN_BYTES); //1.b Memory Allocation on DEVICE hipMalloc( (void **) &d_a, SIZE_IN_BYTES); hipMalloc( (void **) &d_b, SIZE_IN_BYTES); hipMalloc( (void **) &d_c, SIZE_IN_BYTES); //## 2. Memory Initialization HOST //Initializing Host Arrays initializeArray( h_a, WIDTH, HEIGHT, 50); initializeArray( h_b, WIDTH, HEIGHT, 30); //## 3. Memcpy HOST to DEVICE hipMemcpy( d_a, h_a, SIZE_IN_BYTES, hipMemcpyHostToDevice); hipMemcpy( d_b, h_b, SIZE_IN_BYTES, hipMemcpyHostToDevice); //## 4. Kernel Invocation int mat_size= WIDTH * HEIGHT; int threadX = 32; int threadY = 32; int blockX = ceil( WIDTH/threadX) +1; int blockY = ceil( HEIGHT/threadY) +1; dim3 dimBlock( threadX, threadY, 1); dim3 dimGrid( blockX, blockY, 1); hipLaunchKernelGGL(( MulKernel), dim3(dimGrid), dim3(dimBlock), 0, 0, d_a, d_b, d_c, WIDTH, HEIGHT); ////## 5. Memcpy DEVICE to HOST hipMemcpy( h_c, d_c, SIZE_IN_BYTES, hipMemcpyDeviceToHost); // for comparing results int *cpu_results; // to store CPU results cpu_results = (int *) malloc( SIZE_IN_BYTES); //mulKernelCPU( h_a, h_b, cpu_results, WIDTH, HEIGHT); // Displaying Result cout<<"Comparing and Displaying Result"<<endl; //compareResult( h_c, cpu_results, WIDTH, HEIGHT); cout<<endl<<"Showing some data : "<<endl; displayArray( "a",h_a, 5, 5); displayArray("b", h_b, 5, 5); displayArray("c",h_c, 5, 5); //displayArray("cpu_result",cpu_results,2,2); hipFree(&d_a); hipFree(&d_b); hipFree(&d_c); free(h_a); free(h_b); free(h_c); system("pause"); return 0; } void mulKernelCPU( int *arrayA, int *arrayB, int *arrayC, int width, int height){ int arraySize = width * height; for(int i=0; i<arraySize; i++){ int value; for( int j=0; j<arraySize; j++){ value = arrayA[j * height + j ] * arrayB[i * width + i]; } int index = 0; arrayC[ index ] = value; } } /* value = A[ (row * HEIGHT) + i ] * B [ (i * WIDTH) + col ]; } C[index] = value; */ void compareResult( int *arrayA, int *arrayB, int width, int height){ int arraySize = width * height; for(int i=0; i<arraySize; i++){ if( arrayA[i] != arrayB[i]){ cout<<"arrayA["<<i<<"] != arrayB["<<i<<"]"<<endl; cout<<"[NOT SAME] Result on CPU and GPU is not same"<<endl; break; } if (i ==arraySize-1){ cout<<"Result on CPU and GPU is same"<<endl; } } } void initializeArray(int *array, int width, int height, int randomValueSEED){ int MAT_SIZE = width * height; // Initializing Array with random values srand ( time(NULL) ); for( int i=0; i<MAT_SIZE; i++){ int value = rand() % randomValueSEED + 1; array[i] = value; } } void displayArray(char* arrayName,int *array,int width, int height){ cout<<"Displaying Values of Array: "<<arrayName<<endl; for(int i=0; i<width*height; i++){ if( i % width == 0) cout<<endl; //cout<<"Array["<<i<<"] : "<<array[i]<<" "; cout<<"["<<i<<"] : "<<array[i]<<" "; } cout<<endl; } void viewMemUse(int pWidth, int pHeight){ int size = pWidth * pHeight * sizeof(int); cout<<"Size: of WIDTH * HEIGHT * sizeof(int)"<<endl; cout<<"Size = "<<pWidth<<" * "<<pHeight<<" * sizeof(int)"<<endl; cout<<"Size: BYTES "<<size<<endl; cout<<"Size: KBYTES "<<size/1024<<endl; cout<<"Size: MBYTES "<<(size/1024)/1024<<endl; float gSize = ((size/1024.0)/1024.0)/1024.0; cout<<"Size: GBYTES "<<gSize<<endl; }
a7bb9b454999971d98124773ce1af71f1bcab89d.cu
/* ### PROGRAM DESCIPTION ### ## Assigment No. 06 */ #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <stdlib.h> #include< iostream> #include <ctime> using namespace std; /* #CUDA PROGRAM STRUCTURE 1. Memory Allocation on CPU and GPU 2. Initialization of Memory in CPU 3. Memcpy to GPU 4. Kernel Invocation 5. Memcpy to CPU */ //----------------[START] CUDA KERNEL CODE --------------------------- __global__ void MulKernel(int *A, int *B, int *C,int WIDTH, int HEIGHT) { const int Mat_SIZE = WIDTH * HEIGHT; int col = ( blockDim.x * blockIdx.x) + threadIdx.x; int row = ( blockDim.y * blockIdx.y) + threadIdx.y; //int index = col + ( WIDTH * row); if( row < HEIGHT && col < WIDTH){ int value = 0; // will run calculate product for a pixel for(int i = 0; i<WIDTH; i++){ value += A[ (row * HEIGHT) + i ] * B [ (i * WIDTH) + col ]; } C[row*WIDTH+col] = value; } } //---------------- [END] CUDA KERNEL CODE ---------------------------- //will show the memory used by one array i.e a/b/c // viewMemoryUse( WIDTH, HEIGHT) : specific to this program void viewMemUse(int, int); // IniArray( ARRAY, WIDTH, HEIGHT, RandomValueSeed) void initializeArray(int*, int, int, int); // DisplayArray( arrayNAme i.e H_A, array, width, height) void displayArray(char*, int *,int,int); // void mulKernelCPU( int*, int*, int*, int, int); // void compareResult( int *arrayA, int *arrayB, int width, int height); int main() { const int WIDTH = 2048; const int HEIGHT = 2048; int *h_a, *h_b, *h_c; int *d_a, *d_b, *d_c; //## 1. Memory Allocation on HOST & DEVICE //1.a Memory allocation on HOST int SIZE_IN_BYTES = WIDTH * HEIGHT * sizeof(int); h_a = (int *) malloc( SIZE_IN_BYTES); // since square matrix so A = [ WIDTH * WIDTH] h_b = (int *) malloc( SIZE_IN_BYTES); h_c = (int *) malloc( SIZE_IN_BYTES); //1.b Memory Allocation on DEVICE cudaMalloc( (void **) &d_a, SIZE_IN_BYTES); cudaMalloc( (void **) &d_b, SIZE_IN_BYTES); cudaMalloc( (void **) &d_c, SIZE_IN_BYTES); //## 2. Memory Initialization HOST //Initializing Host Arrays initializeArray( h_a, WIDTH, HEIGHT, 50); initializeArray( h_b, WIDTH, HEIGHT, 30); //## 3. Memcpy HOST to DEVICE cudaMemcpy( d_a, h_a, SIZE_IN_BYTES, cudaMemcpyHostToDevice); cudaMemcpy( d_b, h_b, SIZE_IN_BYTES, cudaMemcpyHostToDevice); //## 4. Kernel Invocation int mat_size= WIDTH * HEIGHT; int threadX = 32; int threadY = 32; int blockX = ceil( WIDTH/threadX) +1; int blockY = ceil( HEIGHT/threadY) +1; dim3 dimBlock( threadX, threadY, 1); dim3 dimGrid( blockX, blockY, 1); MulKernel<<<dimGrid, dimBlock>>>(d_a, d_b, d_c, WIDTH, HEIGHT); ////## 5. Memcpy DEVICE to HOST cudaMemcpy( h_c, d_c, SIZE_IN_BYTES, cudaMemcpyDeviceToHost); // for comparing results int *cpu_results; // to store CPU results cpu_results = (int *) malloc( SIZE_IN_BYTES); //mulKernelCPU( h_a, h_b, cpu_results, WIDTH, HEIGHT); // Displaying Result cout<<"Comparing and Displaying Result"<<endl; //compareResult( h_c, cpu_results, WIDTH, HEIGHT); cout<<endl<<"Showing some data : "<<endl; displayArray( "a",h_a, 5, 5); displayArray("b", h_b, 5, 5); displayArray("c",h_c, 5, 5); //displayArray("cpu_result",cpu_results,2,2); cudaFree(&d_a); cudaFree(&d_b); cudaFree(&d_c); free(h_a); free(h_b); free(h_c); system("pause"); return 0; } void mulKernelCPU( int *arrayA, int *arrayB, int *arrayC, int width, int height){ int arraySize = width * height; for(int i=0; i<arraySize; i++){ int value; for( int j=0; j<arraySize; j++){ value = arrayA[j * height + j ] * arrayB[i * width + i]; } int index = 0; arrayC[ index ] = value; } } /* value = A[ (row * HEIGHT) + i ] * B [ (i * WIDTH) + col ]; } C[index] = value; */ void compareResult( int *arrayA, int *arrayB, int width, int height){ int arraySize = width * height; for(int i=0; i<arraySize; i++){ if( arrayA[i] != arrayB[i]){ cout<<"arrayA["<<i<<"] != arrayB["<<i<<"]"<<endl; cout<<"[NOT SAME] Result on CPU and GPU is not same"<<endl; break; } if (i ==arraySize-1){ cout<<"Result on CPU and GPU is same"<<endl; } } } void initializeArray(int *array, int width, int height, int randomValueSEED){ int MAT_SIZE = width * height; // Initializing Array with random values srand ( time(NULL) ); for( int i=0; i<MAT_SIZE; i++){ int value = rand() % randomValueSEED + 1; array[i] = value; } } void displayArray(char* arrayName,int *array,int width, int height){ cout<<"Displaying Values of Array: "<<arrayName<<endl; for(int i=0; i<width*height; i++){ if( i % width == 0) cout<<endl; //cout<<"Array["<<i<<"] : "<<array[i]<<" "; cout<<"["<<i<<"] : "<<array[i]<<" "; } cout<<endl; } void viewMemUse(int pWidth, int pHeight){ int size = pWidth * pHeight * sizeof(int); cout<<"Size: of WIDTH * HEIGHT * sizeof(int)"<<endl; cout<<"Size = "<<pWidth<<" * "<<pHeight<<" * sizeof(int)"<<endl; cout<<"Size: BYTES "<<size<<endl; cout<<"Size: KBYTES "<<size/1024<<endl; cout<<"Size: MBYTES "<<(size/1024)/1024<<endl; float gSize = ((size/1024.0)/1024.0)/1024.0; cout<<"Size: GBYTES "<<gSize<<endl; }
58b25806193600852b73535abdd08a6382d1170e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" namespace oneflow { namespace { template<typename T> __global__ void SmoothL1LossForward(const int64_t elem_cnt, const T* prediction, const T* label, const T beta, T* loss) { const T half_beta = static_cast<T>(0.5) * beta; const T point5_div_beta = static_cast<T>(0.5) / beta; CUDA_1D_KERNEL_LOOP(i, elem_cnt) { const T abs_diff = std::abs(prediction[i] - label[i]); if (abs_diff < beta) { loss[i] = abs_diff * abs_diff * point5_div_beta; } else { loss[i] = abs_diff - half_beta; } } } template<typename T> __global__ void SmoothL1LossBackward(const int64_t elem_cnt, const T* loss_grad, const T* prediction, const T* label, const T beta, T* prediction_grad) { CUDA_1D_KERNEL_LOOP(i, elem_cnt) { const T diff = prediction[i] - label[i]; const T abs_diff = std::abs(diff); if (abs_diff < beta) { prediction_grad[i] = diff / beta * loss_grad[i]; } else { prediction_grad[i] = ((diff > GetZeroVal<T>()) - (diff < GetZeroVal<T>())) * loss_grad[i]; } } } } // namespace template<typename T> class SmoothL1LossGPUKernel final : public user_op::OpKernel { public: SmoothL1LossGPUKernel() = default; ~SmoothL1LossGPUKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const float beta = ctx->Attr<float>("beta"); const user_op::Tensor* prediction_blob = ctx->Tensor4ArgNameAndIndex("prediction", 0); const T* prediction = prediction_blob->dptr<T>(); const int64_t elem_cnt = prediction_blob->shape().elem_cnt(); const T* label = ctx->Tensor4ArgNameAndIndex("label", 0)->dptr<T>(); T* loss = ctx->Tensor4ArgNameAndIndex("loss", 0)->mut_dptr<T>(); hipLaunchKernelGGL(( SmoothL1LossForward<T>) , dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), elem_cnt, prediction, label, beta, loss); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_SMOOTH_L1_LOSS_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("smooth_l1_loss") \ .SetCreateFn<SmoothL1LossGPUKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \ & (user_op::HobDataType("loss", 0) == GetDataType<dtype>::value)); REGISTER_SMOOTH_L1_LOSS_GPU_KERNEL(float) REGISTER_SMOOTH_L1_LOSS_GPU_KERNEL(double) template<typename T> class SmoothL1LossGradGpuKernel final : public user_op::OpKernel { public: SmoothL1LossGradGpuKernel() = default; ~SmoothL1LossGradGpuKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const float beta = ctx->Attr<float>("beta"); const user_op::Tensor* prediction_blob = ctx->Tensor4ArgNameAndIndex("prediction", 0); const T* prediction = prediction_blob->dptr<T>(); const int64_t elem_cnt = prediction_blob->shape().elem_cnt(); const T* loss_grad = ctx->Tensor4ArgNameAndIndex("loss_grad", 0)->dptr<T>(); const T* label = ctx->Tensor4ArgNameAndIndex("label", 0)->dptr<T>(); T* prediction_grad = ctx->Tensor4ArgNameAndIndex("prediction_grad", 0)->mut_dptr<T>(); hipLaunchKernelGGL(( SmoothL1LossBackward<T>), dim3(BlocksNum4ThreadsNum(elem_cnt)), dim3(kCudaThreadsNumPerBlock), 0, ctx->device_ctx()->cuda_stream(), elem_cnt, loss_grad, prediction, label, beta, prediction_grad); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_SMOOTH_L1_LOSS_GRAD_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("smooth_l1_loss_grad") \ .SetCreateFn<SmoothL1LossGradGpuKernel<dtype>>() \ .SetIsMatchedHob( \ (user_op::HobDeviceType() == DeviceType::kGPU) \ & (user_op::HobDataType("prediction_grad", 0) == GetDataType<dtype>::value)); REGISTER_SMOOTH_L1_LOSS_GRAD_GPU_KERNEL(float) REGISTER_SMOOTH_L1_LOSS_GRAD_GPU_KERNEL(double) } // namespace oneflow
58b25806193600852b73535abdd08a6382d1170e.cu
/* Copyright 2020 The OneFlow Authors. All rights reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #include "oneflow/core/framework/framework.h" namespace oneflow { namespace { template<typename T> __global__ void SmoothL1LossForward(const int64_t elem_cnt, const T* prediction, const T* label, const T beta, T* loss) { const T half_beta = static_cast<T>(0.5) * beta; const T point5_div_beta = static_cast<T>(0.5) / beta; CUDA_1D_KERNEL_LOOP(i, elem_cnt) { const T abs_diff = std::abs(prediction[i] - label[i]); if (abs_diff < beta) { loss[i] = abs_diff * abs_diff * point5_div_beta; } else { loss[i] = abs_diff - half_beta; } } } template<typename T> __global__ void SmoothL1LossBackward(const int64_t elem_cnt, const T* loss_grad, const T* prediction, const T* label, const T beta, T* prediction_grad) { CUDA_1D_KERNEL_LOOP(i, elem_cnt) { const T diff = prediction[i] - label[i]; const T abs_diff = std::abs(diff); if (abs_diff < beta) { prediction_grad[i] = diff / beta * loss_grad[i]; } else { prediction_grad[i] = ((diff > GetZeroVal<T>()) - (diff < GetZeroVal<T>())) * loss_grad[i]; } } } } // namespace template<typename T> class SmoothL1LossGPUKernel final : public user_op::OpKernel { public: SmoothL1LossGPUKernel() = default; ~SmoothL1LossGPUKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const float beta = ctx->Attr<float>("beta"); const user_op::Tensor* prediction_blob = ctx->Tensor4ArgNameAndIndex("prediction", 0); const T* prediction = prediction_blob->dptr<T>(); const int64_t elem_cnt = prediction_blob->shape().elem_cnt(); const T* label = ctx->Tensor4ArgNameAndIndex("label", 0)->dptr<T>(); T* loss = ctx->Tensor4ArgNameAndIndex("loss", 0)->mut_dptr<T>(); SmoothL1LossForward<T> <<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(elem_cnt, prediction, label, beta, loss); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_SMOOTH_L1_LOSS_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("smooth_l1_loss") \ .SetCreateFn<SmoothL1LossGPUKernel<dtype>>() \ .SetIsMatchedHob((user_op::HobDeviceType() == DeviceType::kGPU) \ & (user_op::HobDataType("loss", 0) == GetDataType<dtype>::value)); REGISTER_SMOOTH_L1_LOSS_GPU_KERNEL(float) REGISTER_SMOOTH_L1_LOSS_GPU_KERNEL(double) template<typename T> class SmoothL1LossGradGpuKernel final : public user_op::OpKernel { public: SmoothL1LossGradGpuKernel() = default; ~SmoothL1LossGradGpuKernel() = default; private: void Compute(user_op::KernelComputeContext* ctx) const override { const float beta = ctx->Attr<float>("beta"); const user_op::Tensor* prediction_blob = ctx->Tensor4ArgNameAndIndex("prediction", 0); const T* prediction = prediction_blob->dptr<T>(); const int64_t elem_cnt = prediction_blob->shape().elem_cnt(); const T* loss_grad = ctx->Tensor4ArgNameAndIndex("loss_grad", 0)->dptr<T>(); const T* label = ctx->Tensor4ArgNameAndIndex("label", 0)->dptr<T>(); T* prediction_grad = ctx->Tensor4ArgNameAndIndex("prediction_grad", 0)->mut_dptr<T>(); SmoothL1LossBackward<T><<<BlocksNum4ThreadsNum(elem_cnt), kCudaThreadsNumPerBlock, 0, ctx->device_ctx()->cuda_stream()>>>(elem_cnt, loss_grad, prediction, label, beta, prediction_grad); } bool AlwaysComputeWhenAllOutputsEmpty() const override { return false; } }; #define REGISTER_SMOOTH_L1_LOSS_GRAD_GPU_KERNEL(dtype) \ REGISTER_USER_KERNEL("smooth_l1_loss_grad") \ .SetCreateFn<SmoothL1LossGradGpuKernel<dtype>>() \ .SetIsMatchedHob( \ (user_op::HobDeviceType() == DeviceType::kGPU) \ & (user_op::HobDataType("prediction_grad", 0) == GetDataType<dtype>::value)); REGISTER_SMOOTH_L1_LOSS_GRAD_GPU_KERNEL(float) REGISTER_SMOOTH_L1_LOSS_GRAD_GPU_KERNEL(double) } // namespace oneflow
e21c380838b412125cfd334986783f9ab64ca273.hip
// !!! This is a file automatically generated by hipify!!! #include <stdio.h> #include <hip/hip_runtime.h> #include <math.h> #include <stdlib.h> #define BLOCK_SIZE 32 // void show(float* a, int n) { for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { fprintf(stdout, "%g\t", a[j + i * n]); } fprintf(stdout, "\n"); } fprintf(stdout, "\n\n\n"); } __global__ void matMult_global(float *a, float *b, int n, float* c) { int i = threadIdx.x + blockIdx.x * blockDim.x; // -> int j = threadIdx.y + blockIdx.y * blockDim.y; float sum = 0.0f; for (int k = 0; k < n; ++k) sum += a[i * n + k] * b[k * n + j]; c[i * n + j] = sum; } __global__ void matMult_shared(float *a, float *b, int n, float* c) { int aBegin = n * BLOCK_SIZE * blockIdx.y; // , int aEnd = aBegin + n - 1; // , int aStep = BLOCK_SIZE; // int bBegin = BLOCK_SIZE * blockIdx.x; // , int bStep = BLOCK_SIZE * n; // B float sum = 0.0f; // for (int sub_A = aBegin, sub_B = bBegin; sub_A <= aEnd; sub_A += aStep, sub_B += bStep) { // __shared__ float buffer_A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float buffer_B[BLOCK_SIZE][BLOCK_SIZE]; // buffer_A[threadIdx.y][threadIdx.x] = a[sub_A + n * threadIdx.y + threadIdx.x]; buffer_B[threadIdx.y][threadIdx.x] = b[sub_B + n * threadIdx.y + threadIdx.x]; __syncthreads(); // , // for (int k = 0; k < BLOCK_SIZE; k++) sum += buffer_A[threadIdx.x][k] * buffer_B[k][threadIdx.x]; __syncthreads(); // , } int cBegin = n * BLOCK_SIZE * blockIdx.x + BLOCK_SIZE * blockIdx.x; // c[cBegin + n * threadIdx.y + threadIdx.x] = sum; // } int main (int argc, char * argv []) { // if(argc < 2){ fprintf(stderr, " : "); return -1; } // int N = atoi(argv[1]); if(N % BLOCK_SIZE) { fprintf(stderr, " \n"); return -1; } int numBytes = N * N * sizeof(float); // // host float *a = (float*)malloc(numBytes); // float *b = (float*)malloc(numBytes); // B float *c = (float*)malloc(numBytes); // // for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { int k = N * i + j; a[k] = 2.0f; b[k] = 1.0f; } float *adevice, *bdevice, *cdevice; // GPU hipMalloc((void**)&adevice, numBytes); hipMalloc((void**)&bdevice, numBytes); hipMalloc((void**)&cdevice, numBytes); // dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(N / threads.x, N / threads.y); // B c host device // CPU -> GPU hipMemcpy(adevice, a, numBytes, hipMemcpyHostToDevice); hipMemcpy(bdevice, b, numBytes, hipMemcpyHostToDevice); // hipLaunchKernelGGL(( matMult_global), dim3(blocks), dim3(threads), 0, 0, adevice, bdevice, N, cdevice); hipDeviceSynchronize(); //, device host // -> hipMemcpy(c, cdevice, numBytes, hipMemcpyDeviceToHost); hipMemcpy (adevice, a, numBytes, hipMemcpyHostToDevice); hipMemcpy (bdevice, b, numBytes, hipMemcpyHostToDevice); hipLaunchKernelGGL(( matMult_shared), dim3(blocks), dim3(threads), 0, 0, adevice, bdevice, N, cdevice); hipDeviceSynchronize(); hipMemcpy(c, cdevice, numBytes, hipMemcpyDeviceToHost); // GPU CPU hipFree(adevice); hipFree(bdevice); hipFree(cdevice); free(a); free(b); free(c); return 0; }
e21c380838b412125cfd334986783f9ab64ca273.cu
#include <stdio.h> #include <cuda.h> #include <math.h> #include <stdlib.h> #define BLOCK_SIZE 32 //Вывод матрицы void show(float* a, int n) { for(int i = 0; i < n; i++) { for(int j = 0; j < n; j++) { fprintf(stdout, "%g\t", a[j + i * n]); } fprintf(stdout, "\n"); } fprintf(stdout, "\n\n\n"); } __global__ void matMult_global(float *a, float *b, int n, float* c) { int i = threadIdx.x + blockIdx.x * blockDim.x; //Один поток -> один элемент матрицы int j = threadIdx.y + blockIdx.y * blockDim.y; float sum = 0.0f; for (int k = 0; k < n; ++k) sum += a[i * n + k] * b[k * n + j]; c[i * n + j] = sum; } __global__ void matMult_shared(float *a, float *b, int n, float* c) { int aBegin = n * BLOCK_SIZE * blockIdx.y; //Индекс начала первой подматрицы А, которую обрабатывает блок int aEnd = aBegin + n - 1; //Индекс конца подматрицы А, которую обрабатывает блок int aStep = BLOCK_SIZE; //Шаг для перебора матриц А int bBegin = BLOCK_SIZE * blockIdx.x; //Индекс начала первой подматрицы В, которую обрабатывает блок int bStep = BLOCK_SIZE * n; //Шаг для перебора подматиц B float sum = 0.0f; //Вычисление элемента подматрицы for (int sub_A = aBegin, sub_B = bBegin; sub_A <= aEnd; sub_A += aStep, sub_B += bStep) { //Выделение разделямой памяти для подматриц __shared__ float buffer_A[BLOCK_SIZE][BLOCK_SIZE]; __shared__ float buffer_B[BLOCK_SIZE][BLOCK_SIZE]; //Загрузка подматриц А и В из глобальной памяти в разделяемую buffer_A[threadIdx.y][threadIdx.x] = a[sub_A + n * threadIdx.y + threadIdx.x]; buffer_B[threadIdx.y][threadIdx.x] = b[sub_B + n * threadIdx.y + threadIdx.x]; __syncthreads(); // Убедимся, что подматрицы полностью загружены //Перемножение двух матриц for (int k = 0; k < BLOCK_SIZE; k++) sum += buffer_A[threadIdx.x][k] * buffer_B[k][threadIdx.x]; __syncthreads(); // Убедимся, что подматрицы никому больше не нужны } int cBegin = n * BLOCK_SIZE * blockIdx.x + BLOCK_SIZE * blockIdx.x; //Индекс результирующего элемента в глобальной памяти c[cBegin + n * threadIdx.y + threadIdx.x] = sum; //Запись элемента в глобальную память } int main (int argc, char * argv []) { //Ввод размерности матрицы if(argc < 2){ fprintf(stderr, "Введите размер матрицы: "); return -1; } //Проверка входных данных int N = atoi(argv[1]); if(N % BLOCK_SIZE) { fprintf(stderr, "Измените размер\n"); return -1; } int numBytes = N * N * sizeof(float); //Инициализация исходного массива //Выделение памяти на host float *a = (float*)malloc(numBytes); //Матрица А float *b = (float*)malloc(numBytes); //Матрица B float *c = (float*)malloc(numBytes); //Матрица С //Заполнение матриц for (int i = 0; i < N; i++) for (int j = 0; j < N; j++) { int k = N * i + j; a[k] = 2.0f; b[k] = 1.0f; } float *adevice, *bdevice, *cdevice; //Выделение памяти на GPU cudaMalloc((void**)&adevice, numBytes); cudaMalloc((void**)&bdevice, numBytes); cudaMalloc((void**)&cdevice, numBytes); //Задание сетки нитей и блоков dim3 threads(BLOCK_SIZE, BLOCK_SIZE); dim3 blocks(N / threads.x, N / threads.y); //Копирование матриц А и B c host на device //Инициализация происходит на CPU -> Копируем на GPU cudaMemcpy(adevice, a, numBytes, cudaMemcpyHostToDevice); cudaMemcpy(bdevice, b, numBytes, cudaMemcpyHostToDevice); //Запуск функции matMult_global<<<blocks, threads>>>(adevice, bdevice, N, cdevice); cudaDeviceSynchronize(); //Копирование, вычисленной матрицы С с device на host //Возвращаем обратно -> Результат cudaMemcpy(c, cdevice, numBytes, cudaMemcpyDeviceToHost); cudaMemcpy (adevice, a, numBytes, cudaMemcpyHostToDevice); cudaMemcpy (bdevice, b, numBytes, cudaMemcpyHostToDevice); matMult_shared<<<blocks, threads>>>(adevice, bdevice, N, cdevice); cudaDeviceSynchronize(); cudaMemcpy(c, cdevice, numBytes, cudaMemcpyDeviceToHost); //Освобождение памяти на GPU и CPU cudaFree(adevice); cudaFree(bdevice); cudaFree(cdevice); free(a); free(b); free(c); return 0; }
6533aa609256a32c5205333a80fd77b0a694fdcf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zlascl_2x2.cu, normal z -> s, Tue Aug 30 09:38:32 2016 @author Ichitaro Yamazaki */ #include "magma_internal.h" #define NB 64 #define A(i,j) (A[(i) + (j)*lda]) #define W(i,j) (W[(i) + (j)*ldw]) // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void slascl_2x2_lower( int m, const float* W, int ldw, float* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; float D21 = W( 1, 0 ); float D11 = MAGMA_S_DIV( W( 1, 1 ), D21 ); float D22 = MAGMA_S_DIV( W( 0, 0 ), MAGMA_S_CONJ( D21 ) ); float T = 1.0 / ( MAGMA_S_REAL( D11*D22 ) - 1.0 ); D21 = MAGMA_S_DIV( MAGMA_S_MAKE(T,0.0), D21 ); if (ind < m) { A( ind, 0 ) = MAGMA_S_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) ); A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) ); } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void slascl_2x2_upper( int m, const float *W, int ldw, float* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; float D21 = W( m, 1 ); float D11 = MAGMA_S_DIV( W( m+1, 1 ), MAGMA_S_CONJ( D21 ) ); float D22 = MAGMA_S_DIV( W( m, 0 ), D21 ); float T = 1.0 / ( MAGMA_S_REAL( D11*D22 ) - 1.0 ); D21 = MAGMA_S_DIV( MAGMA_S_MAKE(T,0.0), D21 ); if (ind < m) { A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) ); A( ind, 1 ) = MAGMA_S_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) ); } } /***************************************************************************//** Purpose ------- SLASCL_2x2 scales the M by M real matrix A by the 2-by-2 pivot. TYPE specifies that A may be upper or lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] dW REAL vector, dimension (2*lddw) The matrix containing the 2-by-2 pivot. @param[in] lddw INTEGER The leading dimension of the array W. LDDA >= max(1,M). @param[in,out] dA REAL array, dimension (LDDA,N) The matrix to be scaled by dW. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lascl_2x2 *******************************************************************************/ extern "C" void magmablas_slascl_2x2_q( magma_type_t type, magma_int_t m, magmaFloat_const_ptr dW, magma_int_t lddw, magmaFloat_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper ) *info = -1; else if ( m < 0 ) *info = -2; else if ( ldda < max(1,m) ) *info = -4; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if (type == MagmaLower) { hipLaunchKernelGGL(( slascl_2x2_lower) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda); } else { hipLaunchKernelGGL(( slascl_2x2_upper) , dim3(grid), dim3(threads), 0, queue->cuda_stream() , m, dW, lddw, dA, ldda); } }
6533aa609256a32c5205333a80fd77b0a694fdcf.cu
/* -- MAGMA (version 2.1.0) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date August 2016 @generated from magmablas/zlascl_2x2.cu, normal z -> s, Tue Aug 30 09:38:32 2016 @author Ichitaro Yamazaki */ #include "magma_internal.h" #define NB 64 #define A(i,j) (A[(i) + (j)*lda]) #define W(i,j) (W[(i) + (j)*ldw]) // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void slascl_2x2_lower( int m, const float* W, int ldw, float* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; float D21 = W( 1, 0 ); float D11 = MAGMA_S_DIV( W( 1, 1 ), D21 ); float D22 = MAGMA_S_DIV( W( 0, 0 ), MAGMA_S_CONJ( D21 ) ); float T = 1.0 / ( MAGMA_S_REAL( D11*D22 ) - 1.0 ); D21 = MAGMA_S_DIV( MAGMA_S_MAKE(T,0.0), D21 ); if (ind < m) { A( ind, 0 ) = MAGMA_S_CONJ( D21 )*( D11*W( 2+ind, 0 )-W( 2+ind, 1 ) ); A( ind, 1 ) = D21*( D22*W( 2+ind, 1 )-W( 2+ind, 0 ) ); } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void slascl_2x2_upper( int m, const float *W, int ldw, float* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; float D21 = W( m, 1 ); float D11 = MAGMA_S_DIV( W( m+1, 1 ), MAGMA_S_CONJ( D21 ) ); float D22 = MAGMA_S_DIV( W( m, 0 ), D21 ); float T = 1.0 / ( MAGMA_S_REAL( D11*D22 ) - 1.0 ); D21 = MAGMA_S_DIV( MAGMA_S_MAKE(T,0.0), D21 ); if (ind < m) { A( ind, 0 ) = D21*( D11*W( ind, 0 )-W( ind, 1 ) ); A( ind, 1 ) = MAGMA_S_CONJ( D21 )*( D22*W( ind, 1 )-W( ind, 0 ) ); } } /***************************************************************************//** Purpose ------- SLASCL_2x2 scales the M by M real matrix A by the 2-by-2 pivot. TYPE specifies that A may be upper or lower triangular. Arguments --------- @param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. @param[in] m INTEGER The number of rows of the matrix A. M >= 0. @param[in] dW REAL vector, dimension (2*lddw) The matrix containing the 2-by-2 pivot. @param[in] lddw INTEGER The leading dimension of the array W. LDDA >= max(1,M). @param[in,out] dA REAL array, dimension (LDDA,N) The matrix to be scaled by dW. See TYPE for the storage type. @param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). @param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_lascl_2x2 *******************************************************************************/ extern "C" void magmablas_slascl_2x2_q( magma_type_t type, magma_int_t m, magmaFloat_const_ptr dW, magma_int_t lddw, magmaFloat_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper ) *info = -1; else if ( m < 0 ) *info = -2; else if ( ldda < max(1,m) ) *info = -4; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 threads( NB ); dim3 grid( magma_ceildiv( m, NB ) ); if (type == MagmaLower) { slascl_2x2_lower <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda); } else { slascl_2x2_upper <<< grid, threads, 0, queue->cuda_stream() >>> (m, dW, lddw, dA, ldda); } }
322ec90de9eaff1ab58c7b199ccade03db96f273.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void relToAbsIndex2d_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> relIndx, const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> init_spIndx, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> absIndx, int height, int width, int Kh, int Kw, int K) { // indexing const int n = blockIdx.y; const int d = blockIdx.x * blockDim.x + threadIdx.x; const int h = d / width; const int w = d % width; if (h < height) { // Convert spix_idx based on the rel_idx const int rel_idx = static_cast<int>(relIndx[n][0][h][w]); const int rel_idx_h = rel_idx / 3 - 1; int rel_idx_w = rel_idx % 3 - 1; const int init_spix_idx = static_cast<int>(init_spIndx[n][0][h][w]); int spix_idx_h = init_spix_idx + rel_idx_h * Kw; if (spix_idx_h >= K || spix_idx_h <= -1) { spix_idx_h = init_spix_idx; } if (((spix_idx_h + 1) % Kw) == 0 && rel_idx_w == 1) { rel_idx_w = 0; } else if ((spix_idx_h % Kw) == 0 && rel_idx_w == -1) { rel_idx_w = 0; } int spix_idx_w = spix_idx_h + rel_idx_w; if (spix_idx_w < K && spix_idx_w > -1) { absIndx[n][0][h][w] = static_cast<float>(spix_idx_w); } else { absIndx[n][0][h][w] = static_cast<float>(spix_idx_h); } } } } // namespace torch::Tensor relToAbsIndex2d_cuda_forward( const torch::Tensor relIndx, // B 1 H W const torch::Tensor init_spIndx, // B 1 H W const int Kh, const int Kw) { // setup const auto batch_size = relIndx.size(0); const auto height = relIndx.size(2); const auto width = relIndx.size(3); auto absIndx = torch::zeros_like(relIndx); // B 1 H W // launch kernel const int threads = 1024; const dim3 blocks((height * width + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(relIndx.type(), "relToAbsIndex2d_forward_cuda", ([&] { hipLaunchKernelGGL(( relToAbsIndex2d_cuda_forward_kernel<scalar_t>), dim3(blocks), dim3(threads), 0, 0, relIndx.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), init_spIndx.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), absIndx.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), height, width, Kh, Kw, Kh*Kw); })); return absIndx; }
322ec90de9eaff1ab58c7b199ccade03db96f273.cu
#include <torch/extension.h> #include <cuda.h> #include <cuda_runtime.h> #include <vector> namespace { template <typename scalar_t> __global__ void relToAbsIndex2d_cuda_forward_kernel( const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> relIndx, const torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> init_spIndx, torch::PackedTensorAccessor<scalar_t,4,torch::RestrictPtrTraits,size_t> absIndx, int height, int width, int Kh, int Kw, int K) { // indexing const int n = blockIdx.y; const int d = blockIdx.x * blockDim.x + threadIdx.x; const int h = d / width; const int w = d % width; if (h < height) { // Convert spix_idx based on the rel_idx const int rel_idx = static_cast<int>(relIndx[n][0][h][w]); const int rel_idx_h = rel_idx / 3 - 1; int rel_idx_w = rel_idx % 3 - 1; const int init_spix_idx = static_cast<int>(init_spIndx[n][0][h][w]); int spix_idx_h = init_spix_idx + rel_idx_h * Kw; if (spix_idx_h >= K || spix_idx_h <= -1) { spix_idx_h = init_spix_idx; } if (((spix_idx_h + 1) % Kw) == 0 && rel_idx_w == 1) { rel_idx_w = 0; } else if ((spix_idx_h % Kw) == 0 && rel_idx_w == -1) { rel_idx_w = 0; } int spix_idx_w = spix_idx_h + rel_idx_w; if (spix_idx_w < K && spix_idx_w > -1) { absIndx[n][0][h][w] = static_cast<float>(spix_idx_w); } else { absIndx[n][0][h][w] = static_cast<float>(spix_idx_h); } } } } // namespace torch::Tensor relToAbsIndex2d_cuda_forward( const torch::Tensor relIndx, // B 1 H W const torch::Tensor init_spIndx, // B 1 H W const int Kh, const int Kw) { // setup const auto batch_size = relIndx.size(0); const auto height = relIndx.size(2); const auto width = relIndx.size(3); auto absIndx = torch::zeros_like(relIndx); // B 1 H W // launch kernel const int threads = 1024; const dim3 blocks((height * width + threads - 1) / threads, batch_size); AT_DISPATCH_FLOATING_TYPES(relIndx.type(), "relToAbsIndex2d_forward_cuda", ([&] { relToAbsIndex2d_cuda_forward_kernel<scalar_t><<<blocks, threads>>>( relIndx.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), init_spIndx.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), absIndx.packed_accessor<scalar_t,4,torch::RestrictPtrTraits,size_t>(), height, width, Kh, Kw, Kh*Kw); })); return absIndx; }
cbb83f64856f2cc3e8ad8be53db1052ba8732aff.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include "mttkrp_gpu.h" #include <vector> inline hipError_t checkCuda(hipError_t result, int s){ if (result != hipSuccess) { fprintf(stderr, "CUDA Runtime Error at line %d: %s\n", s, hipGetErrorString(result)); assert(result == hipSuccess); } return result; } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds2, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbr+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbr+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbr+fr];//slc; tmp_val = 0; for(unsigned int x = fbrPtr1[fbr+fr] + workId; x < fbrPtr1[fbr+fr+1]; x+=warpPerSlice) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; //2PR } if(fbrLikeSlcInds[fbr+fr] != fbrLikeSlcInds[fbr+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } tmp = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds3, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, const ITYPE *__restrict__ fbrPtr2, const ITYPE *__restrict__ fbrIdx2, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, const DTYPE *__restrict__ dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2= 0; if(fbrS < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbrS+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbrS+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbrS+fr];//slc; tmp = 0; for (int fbr = fbrPtr1[fbrS+fr] + workId; fbr < fbrPtr1[fbrS+fr+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; x++) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU3[idx3 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU2[idx2 * R + r] ; } } for(unsigned int r=laneId; r<R; r+=32) { tmp2 += tmp * dU1[idx1 * R + r] ; } if(fbrLikeSlcInds[fbrS+fr] != fbrLikeSlcInds[fbrS+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } tmp2 = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds3, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, const ITYPE *__restrict__ fbrPtr2, const ITYPE *__restrict__ fbrIdx2, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, const DTYPE *__restrict__ dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2 = 0; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; unsigned int idx3 = fbrLikeSlcInds[fbrS];//slc; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r] ; //2MR } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds2, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx2 = fbrLikeSlcInds[fbr];//slc; for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds3, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, const ITYPE *__restrict__ fbrPtr2, const ITYPE *__restrict__ fbrIdx2, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, const DTYPE *__restrict__ dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp; if(fbrS < nFibers - 1){ unsigned int idx2 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx0 = fbrIdx2[fbr]; tmp = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp += vals[x] * dU1[idx1 * R + r]; //2MR } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp * dU2[idx2 * R + r] * dU3[idx3 * R + r]) ; } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds2, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx1 = fbrLikeSlcInds[fbr];//slc; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds3, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, const ITYPE *__restrict__ fbrPtr2, const ITYPE *__restrict__ fbrIdx2, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, const DTYPE *__restrict__ dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val = 0;; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx1 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } int MTTKRP_MIHCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ ITYPE *dInds2, *dInds3, *dFbrPtr0, *dFbrIdx0, *dfbrPtr1, *dFbrIdx1, *dFbrPtr2, *dFbrIdx2, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // All m same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int m = 0; m < TiledX[0].ndims; ++m){ if (TiledX[m].totNnz == 0) continue; totNnz += TiledX[m].totNnz; totSlcPtr += TiledX[m].fbrPtr[0].size() ; totSlcIdx += TiledX[m].fbrIdx[0].size() ; totFbrPtr += TiledX[m].fbrPtr[1].size() ; totFbrIdx += TiledX[m].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ; } // Allocate Tensor on a device checkCuda(hipMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrPtr0, totSlcPtr * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrIdx0, totSlcIdx * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrIdx1, totFbrIdx * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), __LINE__); if(TiledX[0].ndims == 3) checkCuda(hipMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), __LINE__); if(TiledX[0].ndims == 4){ checkCuda(hipMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), __LINE__); checkCuda(hipMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), __LINE__); } // device memory copy for tiled parts for (int m = 0; m < TiledX[0].ndims; ++m){ if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ; } } if (TiledX[m].totNnz == 0) continue; checkCuda(hipMemcpyAsync(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),hipMemcpyHostToDevice, 0), __LINE__); checkCuda(hipMemcpyAsync(dFbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); checkCuda(hipMemcpyAsync(dFbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); checkCuda(hipMemcpyAsync(dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); checkCuda(hipMemcpyAsync(dFbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); checkCuda(hipMemcpyAsync(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); if(TiledX[m].ndims == 3){ if(m <= 2) checkCuda(hipMemcpyAsync(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); } if(TiledX[m].ndims == 4){ checkCuda(hipMemcpyAsync(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); checkCuda(hipMemcpyAsync(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); checkCuda(hipMemcpyAsync(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),hipMemcpyHostToDevice, 0), __LINE__); } } //Matrices unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; //Matrices DTYPE *dU; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(hipMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), __LINE__); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; hipMemset(dU, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(hipMemcpyAsync(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice, 0), __LINE__); checkCuda(hipMemcpyAsync(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice, 0), __LINE__); if(TiledX[0].ndims == 4) checkCuda(hipMemcpyAsync(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), hipMemcpyHostToDevice, 0), __LINE__); dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){ if(MTTKRPmode > 0){ dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0; // MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again. int mode = MTTKRPmode - 1; for(long r = 0; r < U[mode].nRows; ++r){ for(long c = 0; c < U[mode].nCols; ++c) U[mode].vals[r * U[mode].nCols + c] = mode + .5; } if(MTTKRPmode == 1){ checkCuda(hipMemcpyAsync(dU, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), hipMemcpyHostToDevice, 0), __LINE__); hipMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 2){ checkCuda(hipMemcpyAsync(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), hipMemcpyHostToDevice, 0), __LINE__); hipMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 3){ checkCuda(hipMemcpyAsync(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), hipMemcpyHostToDevice, 0), __LINE__); hipMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)); } } for (int m = 0; m < TiledX[0].ndims; ++m){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[m].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ; } } if (TiledX[m].totNnz == 0) continue; int BLOCKSIZE; if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: Slc atomics\n" ; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4; int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ std::cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << std::endl << "hint: increase -b!" << std::endl; return -1; } grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) { hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } } else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: FbrS atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: Fbr atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) { hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } } else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: nnz atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if (TiledX[0].ndims == 3) hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) { hipLaunchKernelGGL(( mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D), dim3(grid), dim3(block), 0, 0, dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } } hipDeviceSynchronize(); } } /* Copying output matrix from GPU to CPU for correctness check */ int MTTKRPmode = TiledX[0].ndims - 1; ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]); checkCuda(hipMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), hipMemcpyDeviceToHost), __LINE__); hipFree(dVals); hipFree(dU); hipFree(dFbrIdx0); hipFree(dFbrIdx1); hipFree(dFbrPtr0); hipFree(dfbrPtr1); hipFree(dFbrLikeSlcInds); if(TiledX[0].ndims == 3) hipFree(dInds2); if(TiledX[0].ndims == 4){ hipFree(dFbrIdx2); hipFree(dFbrPtr2); hipFree(dInds3); } delete[] dULoc; delete[] szDU; int totalMIslics = 0, totalMISfibers = 0, totalMIfibers = 0, totalMInnz = 0;; for (int m = 0; m < TiledX[0].ndims; ++m){ if(TiledX[m].totNnz){ if(TiledX[m].ndims == 3){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMIfibers += TiledX[m].fbrPtr[1].size(); totalMInnz += TiledX[m].totNnz; } if(TiledX[m].ndims == 4){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMISfibers += TiledX[m].fbrPtr[1].size(); totalMIfibers += TiledX[m].fbrPtr[2].size(); totalMInnz += TiledX[m].totNnz; } } } std::cout << "Resource usage: " << std::endl; if(TiledX[0].ndims == 3) std::cout << " nSlc:" << totalMIslics << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << std::endl; else if(TiledX[0].ndims == 4) std::cout << " nSlc:" << totalMIslics << ", nSFibers:" << totalMISfibers << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << std::endl; return 0; }
cbb83f64856f2cc3e8ad8be53db1052ba8732aff.cu
#include <iostream> #include "mttkrp_gpu.h" #include <vector> inline cudaError_t checkCuda(cudaError_t result, int s){ if (result != cudaSuccess) { fprintf(stderr, "CUDA Runtime Error at line %d: %s\n", s, cudaGetErrorString(result)); assert(result == cudaSuccess); } return result; } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds2, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbr+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbr+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbr+fr];//slc; tmp_val = 0; for(unsigned int x = fbrPtr1[fbr+fr] + workId; x < fbrPtr1[fbr+fr+1]; x+=warpPerSlice) { unsigned int idx2 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU2[idx2 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU1[idx1 * R + r] ; //2PR } if(fbrLikeSlcInds[fbr+fr] != fbrLikeSlcInds[fbr+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } tmp = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds3, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, const ITYPE *__restrict__ fbrPtr2, const ITYPE *__restrict__ fbrIdx2, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, const DTYPE *__restrict__ dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC, int fbrPerWarp, int logOfFPW) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = (gId >> (5 + logOfWPC)) << logOfFPW; // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2= 0; if(fbrS < nFibers - 1){ tmp_val = 0; bool diffFiber = false; unsigned int idx0; for (int fr = 0; fr < fbrPerWarp && (fbrS+fr) < (nFibers - 1); ++fr){ diffFiber = false; unsigned int idx1 = fbrIdx1[fbrS+fr];// dInds1[fbrPtr1[fbr]]; idx0 = fbrLikeSlcInds[fbrS+fr];//slc; tmp = 0; for (int fbr = fbrPtr1[fbrS+fr] + workId; fbr < fbrPtr1[fbrS+fr+1]; fbr+=warpPerSlice){ ITYPE idx2 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; x++) { unsigned int idx3 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU3[idx3 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp += tmp_val * dU2[idx2 * R + r] ; } } for(unsigned int r=laneId; r<R; r+=32) { tmp2 += tmp * dU1[idx1 * R + r] ; } if(fbrLikeSlcInds[fbrS+fr] != fbrLikeSlcInds[fbrS+fr+1]) { diffFiber = true; for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } tmp2 = 0; } } if(!diffFiber) { for(unsigned int r=laneId; r<R; r+=32) atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds3, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, const ITYPE *__restrict__ fbrPtr2, const ITYPE *__restrict__ fbrIdx2, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, const DTYPE *__restrict__ dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val, tmp2 = 0; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx0 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; unsigned int idx3 = fbrLikeSlcInds[fbrS];//slc; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx1 = fbrIdx2[fbr]; tmp_val = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx2 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp_val += vals[x] * dU2[idx2 * R + r] ; //2MR } for(unsigned int r=laneId; r<R; r+=32) tmp += tmp_val * dU1[idx1 * R + r] ; } for(unsigned int r=laneId; r<R; r+=32) { tmp2 = tmp * dU3[idx3 * R + r]; atomicAdd(&dU0[idx0 * R + r], tmp2); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds2, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx0 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; unsigned int idx2 = fbrLikeSlcInds[fbr];//slc; for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx1 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val += vals[x] * dU1[idx1 * R + r]; //2MR } } for(unsigned int r=laneId; r<R; r+=32) { tmp = tmp_val * dU2[idx2 * R + r] ; atomicAdd(&dU0[idx0 * R + r], tmp); //2PR } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds3, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, const ITYPE *__restrict__ fbrPtr2, const ITYPE *__restrict__ fbrIdx2, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, const DTYPE *__restrict__ dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp; if(fbrS < nFibers - 1){ unsigned int idx2 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx3 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ unsigned int idx0 = fbrIdx2[fbr]; tmp = 0; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx1 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) tmp += vals[x] * dU1[idx1 * R + r]; //2MR } for(unsigned int r=laneId; r<R; r+=32) { atomicAdd(&dU0[idx0 * R + r], tmp * dU2[idx2 * R + r] * dU3[idx3 * R + r]) ; } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds2, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbr = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val; if(fbr < nFibers - 1){ tmp_val = 0; unsigned int idx1 = fbrLikeSlcInds[fbr];//slc; unsigned int idx2 = fbrIdx1[fbr];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for(unsigned int x = fbrPtr1[fbr] + workId; x < fbrPtr1[fbr+1]; x+=warpPerSlice) { unsigned int idx0 = dInds2[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp_val = vals[x] * tmp;///dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //2MR atomicAdd(&dU0[idx0 * R + r], tmp_val); } } } } // CUDA fbr atomic sing slcLikeFbr __global__ void mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D( const DTYPE *__restrict__ vals, const ITYPE *__restrict__ fbrLikeSlcInds, const ITYPE *__restrict__ dInds3, const ITYPE *__restrict__ fbrPtr0, const ITYPE *__restrict__ fbrPtr1, const ITYPE *__restrict__ fbrIdx1, const ITYPE *__restrict__ fbrPtr2, const ITYPE *__restrict__ fbrIdx2, ITYPE nFibers, DTYPE *__restrict__ dU0, const DTYPE *__restrict__ dU1, const DTYPE *__restrict__ dU2, const DTYPE *__restrict__ dU3, ITYPE mode, ITYPE R, ITYPE warpPerSlice, int logOfWPC) { ITYPE tId = threadIdx.x; ITYPE laneId = tId & 31; ITYPE bdim = blockDim.x; ITYPE gId = (blockIdx.x * bdim + tId); ITYPE workId = (tId & ((1 << (5 + logOfWPC)) - 1)) >> 5; //tId >> 5; //tId >> 5;// ITYPE fbrS = gId >> (5 + logOfWPC); // 5: minimum 1 WARP (2^5) // blockIdx.x ;// DTYPE tmp = 0, tmp_val = 0;; if(fbrS < nFibers - 1){ tmp = 0; unsigned int idx1 = fbrLikeSlcInds[fbrS];//slc; unsigned int idx2 = fbrIdx1[fbrS];// dInds1[fbrPtr1[fbr]]; for(unsigned int r=laneId; r<R; r+=32) tmp_val = dU1[idx1 * R + r] * dU2[idx2 * R + r] ; //1PR for (int fbr = fbrPtr1[fbrS] + workId; fbr < fbrPtr1[fbrS+1]; fbr+=warpPerSlice){ ITYPE idx3 = fbrIdx2[fbr]; for(unsigned int x = fbrPtr2[fbr]; x < fbrPtr2[fbr+1]; ++x) { unsigned int idx0 = dInds3[x]; for(unsigned int r=laneId; r<R; r+=32) { tmp = vals[x] * dU3[idx3 * R + r] * tmp_val;//2MR atomicAdd(&dU0[idx0 * R + r], tmp); } } } } } int MTTKRP_MIHCSR_GPU(TiledTensor *TiledX, Matrix *U, const Options &Opt){ ITYPE *dInds2, *dInds3, *dFbrPtr0, *dFbrIdx0, *dfbrPtr1, *dFbrIdx1, *dFbrPtr2, *dFbrIdx2, *dFbrLikeSlcInds; DTYPE *dVals; ITYPE dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0, dFbrLoc =0, dFbrIdxLoc =0, dFbrLoc2 =0; ITYPE totNnz = 0, totSlcPtr = 0, totSlcIdx = 0, totFbrPtr = 0, totFbrIdx = 0, totFbrPtr2 = 0; // All m same mode ITYPE mode0 = 0;//TiledX[0].modeOrder[0]; ITYPE mode1 = 1;//TiledX[0].modeOrder[1]; ITYPE mode2 = 2;//TiledX[0].modeOrder[2]; ITYPE mode3 = 3;//((TiledX[0].ndims == 4) ? TiledX[0].modeOrder[3] : 0) ; for (int m = 0; m < TiledX[0].ndims; ++m){ if (TiledX[m].totNnz == 0) continue; totNnz += TiledX[m].totNnz; totSlcPtr += TiledX[m].fbrPtr[0].size() ; totSlcIdx += TiledX[m].fbrIdx[0].size() ; totFbrPtr += TiledX[m].fbrPtr[1].size() ; totFbrIdx += TiledX[m].fbrIdx[1].size() ; totFbrPtr2 += ((TiledX[m].ndims == 4) ? TiledX[m].fbrPtr[2].size() : 0) ; } // Allocate Tensor on a device checkCuda(cudaMalloc((void**) &dVals, totNnz * sizeof(DTYPE)), __LINE__); checkCuda(cudaMalloc((void**) &dFbrPtr0, totSlcPtr * sizeof(ITYPE)), __LINE__); checkCuda(cudaMalloc((void**) &dFbrIdx0, totSlcIdx * sizeof(ITYPE)), __LINE__); checkCuda(cudaMalloc((void**) &dfbrPtr1, totFbrPtr * sizeof(ITYPE)), __LINE__); checkCuda(cudaMalloc((void**) &dFbrIdx1, totFbrIdx * sizeof(ITYPE)), __LINE__); checkCuda(cudaMalloc((void**) &dFbrLikeSlcInds, totFbrIdx * sizeof(ITYPE)), __LINE__); if(TiledX[0].ndims == 3) checkCuda(cudaMalloc((void**) &dInds2, totNnz * sizeof(ITYPE)), __LINE__); if(TiledX[0].ndims == 4){ checkCuda(cudaMalloc((void**) &dFbrIdx2, totFbrPtr2 * sizeof(ITYPE)), __LINE__); checkCuda(cudaMalloc((void**) &dFbrPtr2, totFbrPtr2 * sizeof(ITYPE)), __LINE__); checkCuda(cudaMalloc((void**) &dInds3, totNnz * sizeof(ITYPE)), __LINE__); } // device memory copy for tiled parts for (int m = 0; m < TiledX[0].ndims; ++m){ if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); // all m same dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[m].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size() : 0) ; } } if (TiledX[m].totNnz == 0) continue; checkCuda(cudaMemcpyAsync(dVals + dLoc, &(TiledX[m].vals[0]), TiledX[m].totNnz * sizeof(DTYPE),cudaMemcpyHostToDevice, 0), __LINE__); checkCuda(cudaMemcpyAsync(dFbrPtr0 + dSlcLoc, &(TiledX[m].fbrPtr[0][0]), TiledX[m].fbrPtr[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); checkCuda(cudaMemcpyAsync(dFbrIdx0 + dSlcIdxLoc, &(TiledX[m].fbrIdx[0][0]), TiledX[m].fbrIdx[0].size() * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); checkCuda(cudaMemcpyAsync(dfbrPtr1 + dFbrLoc, &(TiledX[m].fbrPtr[1][0]), TiledX[m].fbrPtr[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); checkCuda(cudaMemcpyAsync(dFbrIdx1 + dFbrIdxLoc, &(TiledX[m].fbrIdx[1][0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); checkCuda(cudaMemcpyAsync(dFbrLikeSlcInds + dFbrIdxLoc, &(TiledX[m].fbrLikeSlcInds[0]), TiledX[m].fbrIdx[1].size() * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); if(TiledX[m].ndims == 3){ if(m <= 2) checkCuda(cudaMemcpyAsync(dInds2 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[2]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); } if(TiledX[m].ndims == 4){ checkCuda(cudaMemcpyAsync(dFbrPtr2 + dFbrLoc2, &(TiledX[m].fbrPtr[2][0]), TiledX[m].fbrPtr[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); checkCuda(cudaMemcpyAsync(dFbrIdx2 + dFbrLoc2, &(TiledX[m].fbrIdx[2][0]), TiledX[m].fbrIdx[2].size() * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); checkCuda(cudaMemcpyAsync(dInds3 + dLoc, &(TiledX[m].inds[TiledX[m].modeOrder[3]][0]), TiledX[m].totNnz * sizeof(ITYPE),cudaMemcpyHostToDevice, 0), __LINE__); } } //Matrices unsigned int *dULoc = new unsigned int[TiledX[0].ndims]; unsigned int *szDU = new unsigned int[TiledX[0].ndims]; //Matrices DTYPE *dU; ITYPE mtxSize = ((TiledX[0].ndims == 3) ? (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows) * U[mode0].nCols : (U[mode0].nRows + U[mode1].nRows + U[mode2].nRows + U[mode3].nRows) * U[mode0].nCols ); checkCuda(cudaMalloc((void**) &dU, mtxSize * sizeof(DTYPE)), __LINE__); for (int m = 0; m < TiledX[0].ndims; ++m) szDU[m] = U[m].nRows * U[m].nCols; cudaMemset(dU, 0, U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE)); checkCuda(cudaMemcpyAsync(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice, 0), __LINE__); checkCuda(cudaMemcpyAsync(dU + szDU[0] + szDU[1], &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice, 0), __LINE__); if(TiledX[0].ndims == 4) checkCuda(cudaMemcpyAsync(dU + szDU[0] + szDU[1] + szDU[2], &(U[mode3].vals[0]), U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice, 0), __LINE__); dLoc = 0, dSlcLoc = 0, dSlcIdxLoc = 0; dFbrLoc =0, dFbrIdxLoc = 0, dFbrLoc2= 0; for (int MTTKRPmode = 0; MTTKRPmode < TiledX[0].ndims; ++MTTKRPmode){ if(MTTKRPmode > 0){ dLoc = 0; dSlcLoc = 0; dSlcIdxLoc = 0; dFbrLoc =0; dFbrIdxLoc = 0; dFbrLoc2= 0; // MTTKRP on mode mode 0 changed DU0. To pass correctness for now initializing to 2 again. int mode = MTTKRPmode - 1; for(long r = 0; r < U[mode].nRows; ++r){ for(long c = 0; c < U[mode].nCols; ++c) U[mode].vals[r * U[mode].nCols + c] = mode + .5; } if(MTTKRPmode == 1){ checkCuda(cudaMemcpyAsync(dU, &(U[mode0].vals[0]), U[mode0].nRows * U[mode0].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice, 0), __LINE__); cudaMemset(dU + szDU[0], 0, U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 2){ checkCuda(cudaMemcpyAsync(dU + szDU[0], &(U[mode1].vals[0]), U[mode1].nRows * U[mode1].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice, 0), __LINE__); cudaMemset(dU + szDU[0] + szDU[1], 0, U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE)); } else if(MTTKRPmode == 3){ checkCuda(cudaMemcpyAsync(dU + szDU[0] + szDU[1] , &(U[mode2].vals[0]), U[mode2].nRows * U[mode2].nCols * sizeof(DTYPE), cudaMemcpyHostToDevice, 0), __LINE__); cudaMemset(dU + szDU[0] + szDU[1] + szDU[2], 0, U[mode3].nRows * U[mode3].nCols * sizeof(DTYPE)); } } for (int m = 0; m < TiledX[0].ndims; ++m){ /* matrix order according to mode order*/ for (int mm = 0; mm < TiledX[0].ndims; ++mm){ int curMode = TiledX[m].modeOrder[mm]; dULoc[mm] = 0; for (int q = 0; q < curMode; ++q) dULoc[mm] += szDU[q % TiledX[0].ndims]; //1 2 3 0 } if(m > 0) { if (TiledX[m-1].totNnz > 0) { dLoc += TiledX[m-1].totNnz; dSlcLoc += TiledX[m - 1].fbrPtr[0].size(); dSlcIdxLoc += TiledX[m - 1].fbrIdx[0].size(); dFbrLoc += TiledX[m - 1].fbrPtr[1].size(); dFbrIdxLoc += TiledX[m - 1].fbrIdx[1].size(); dFbrLoc2 += ((TiledX[0].ndims == 4) ? TiledX[m - 1].fbrPtr[2].size(): 0) ; } } if (TiledX[m].totNnz == 0) continue; int BLOCKSIZE; if(TiledX[m].modeOrder[0] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: Slc atomics\n" ; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice;//4; int logOfWarpPerFbr = log2(warpPerFbr); int fbrPerWarp = Opt.fiberPerWarp;//1;//BLOCKSIZE/32; // dont overflow TB int logOfFbrPerWarp = log2(fbrPerWarp ); if( (warpPerFbr > (BLOCKSIZE/32)) || (fbrPerWarp > (BLOCKSIZE/32)) ){ std::cout << "warpPerFbr (-w) or fbrPerWarp (-s) cannot be higher than threadblock size!" << std::endl << "hint: increase -b!" << std::endl; return -1; } grid.x = ( warpPerFbr * 32 * ((TiledX[m].nFibers + fbrPerWarp-1)/fbrPerWarp) + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); else if(TiledX[0].ndims == 4) { mttkrp_MIHCSR_kernel_slc_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr, fbrPerWarp, logOfFbrPerWarp); } } else if(TiledX[0].ndims == 4 && TiledX[m].modeOrder[1] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: FbrS atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; mttkrp_MIHCSR_kernel_fbrS_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } else if(TiledX[m].modeOrder[TiledX[0].ndims-2] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: Fbr atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if(TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[1], dU + dULoc[2], dU + dULoc[0], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) { mttkrp_MIHCSR_kernel_fbr_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } } else if(TiledX[m].modeOrder[TiledX[0].ndims-1] == MTTKRPmode && TiledX[m].totNnz){ std::cout << "Run the implemention: nnz atomics\n"; BLOCKSIZE = Opt.TBsize; dim3 block(BLOCKSIZE, 1, 1), grid(1, 1, 1); int warpPerFbr = Opt.warpPerSlice; // default 4 if(warpPerFbr > (BLOCKSIZE/32)){ std::cout << "warpPerFbr (-w) cannot be higher than threadblock size! hint: increase -b!" << std::endl; return -1; } int logOfWarpPerFbr = log2(warpPerFbr); grid.x = ( warpPerFbr * 32 * TiledX[m].nFibers + BLOCKSIZE - 1) / BLOCKSIZE; if (TiledX[0].ndims == 3) mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds2 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, TiledX[m].nFibers, dU + dULoc[2], dU + dULoc[0], dU + dULoc[1], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); else if (TiledX[0].ndims == 4) { mttkrp_MIHCSR_kernel_all_atomic_fbrLvlPar_4D<<<grid, block, 0, 0>>>(dVals + dLoc, dFbrLikeSlcInds + dFbrIdxLoc, dInds3 + dLoc, dFbrPtr0 + dSlcLoc, dfbrPtr1 + dFbrLoc, dFbrIdx1 + dFbrIdxLoc, dFbrPtr2 + dFbrLoc2, dFbrIdx2 + dFbrLoc2, TiledX[m].nFibers, dU + dULoc[3], dU + dULoc[0], dU + dULoc[1], dU + dULoc[2], Opt.mode, Opt.R, warpPerFbr, logOfWarpPerFbr); } } cudaDeviceSynchronize(); } } /* Copying output matrix from GPU to CPU for correctness check */ int MTTKRPmode = TiledX[0].ndims - 1; ITYPE loc = ((TiledX[0].ndims == 3) ? szDU[0] + szDU[1] : szDU[0] + szDU[1] + szDU[2]); checkCuda(cudaMemcpy(&U[MTTKRPmode].vals[0], dU + loc, U[MTTKRPmode].nRows * U[MTTKRPmode].nCols * sizeof(DTYPE), cudaMemcpyDeviceToHost), __LINE__); cudaFree(dVals); cudaFree(dU); cudaFree(dFbrIdx0); cudaFree(dFbrIdx1); cudaFree(dFbrPtr0); cudaFree(dfbrPtr1); cudaFree(dFbrLikeSlcInds); if(TiledX[0].ndims == 3) cudaFree(dInds2); if(TiledX[0].ndims == 4){ cudaFree(dFbrIdx2); cudaFree(dFbrPtr2); cudaFree(dInds3); } delete[] dULoc; delete[] szDU; int totalMIslics = 0, totalMISfibers = 0, totalMIfibers = 0, totalMInnz = 0;; for (int m = 0; m < TiledX[0].ndims; ++m){ if(TiledX[m].totNnz){ if(TiledX[m].ndims == 3){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMIfibers += TiledX[m].fbrPtr[1].size(); totalMInnz += TiledX[m].totNnz; } if(TiledX[m].ndims == 4){ totalMIslics += TiledX[m].fbrIdx[0].size(); totalMISfibers += TiledX[m].fbrPtr[1].size(); totalMIfibers += TiledX[m].fbrPtr[2].size(); totalMInnz += TiledX[m].totNnz; } } } std::cout << "Resource usage: " << std::endl; if(TiledX[0].ndims == 3) std::cout << " nSlc:" << totalMIslics << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << std::endl; else if(TiledX[0].ndims == 4) std::cout << " nSlc:" << totalMIslics << ", nSFibers:" << totalMISfibers << ", nFibers:" << totalMIfibers << ", nnz:" << totalMInnz << std::endl; return 0; }
712e57918be4b1711ce8b84ebaf12f8dafa03e72.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "matrixMul_kernel.hip" #include "ScopeProfile.h" extern std::vector<StepProfile> gProfile; extern std::map<std::string, long> gProfileGroup; void runTest(int argc, char **argv, int M, int K, int N) { #if 1 printf("************\n M = %d N = %d K = %d.\n", M, N, K); gProfile.clear(); cublasStatus status; status = hipblasInit(); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf (stderr, "!!!! CUBLAS initialization error\n"); return; } /* thrust::host_vector<float> A(M*K); thrust::host_vector<float> B(K*N);*/ thrust::device_vector<float> A(M*K); thrust::device_vector<float> B(K*N); #if 1 thrust::sequence(A.begin(), A.end()); thrust::sequence(B.begin(), B.end()); #else for (int i = 0; i < M*K; i++) A[i] = rand()&1; for (int i = 0; i < N*K; i++) B[i] = rand()&1; #endif thrust::device_vector<float> C(M*N); thrust::device_vector<float> C_block = C; thrust::device_vector<float> C_warp = C; thrust::device_vector<float> C_subwarp = C; thrust::device_vector<float> C_thread = C; /* Clear last error */ hipblasGetError(); float alpha = 1.0f; float beta = 0.0f; float *d_A = thrust::raw_pointer_cast(&A[0]); float *d_B = thrust::raw_pointer_cast(&B[0]); float *d_C = thrust::raw_pointer_cast(&C[0]); float *d_C_block = thrust::raw_pointer_cast(&C_block[0]); float *d_C_warp = thrust::raw_pointer_cast(&C_warp[0]); float *d_C_thread = thrust::raw_pointer_cast(&C_thread[0]); long flops = M*N*K ; /* Performs operation using cublas */ { GPUScopeProfile cublas("cublas"); hipblasSgemm('n', 'n', M, N, K, alpha, d_A, M, d_B, K, beta, d_C, M); } status = hipblasGetError(); if (status != HIPBLAS_STATUS_SUCCESS) { fprintf (stderr, "!!!! kernel execution error.\n"); return; } // printf("matrix multiply cublas M %d N %d K %d gflops %f.\n", M, N, K, (M*N*K) / seconds * 10e-9); #if 0 { GPUScopeProfile mm_block("block"); // int warps_per_block = 8; dim3 blocks(M, N, 1); hipLaunchKernelGGL(( matrixMultiply_block), dim3(blocks), dim3(256), 0, 0, M, N, K, alpha, d_A, M, d_B, K, beta, d_C_block, N); // float seconds = elapsed.seconds_elapsed(); // printf("matrix multiply block per element M %d N %d K %d gflops %f.\n", M, N, K, (M*N*K) / seconds * 10e-9); } { GPUScopeProfile mm_warp("warp"); // int warps_per_block = 8; dim3 blocks(M, max((N-1)/WARPS_PER_BLOCK+1,1), 1); hipLaunchKernelGGL(( matrixMultiply_warp), dim3(blocks), dim3(BLOCK_SIZE), 0, 0, M, N, K, alpha, d_A, M, d_B, K, beta, d_C_warp, N); // float seconds = elapsed.seconds_elapsed(); // printf("matrix multiply warp per element M %d N %d K %d gflops %f.\n", M, N, K, (M*N*K) / seconds * 10e-9); } #endif { GPUScopeProfile mm_thread("thread"); // int warps_per_block = 8; dim3 blocks((M-1)/BLOCK_SIZE_SQROOT+1, (N-1)/BLOCK_SIZE_SQROOT+1, 1); dim3 threads(16, 16, 1); hipLaunchKernelGGL(( matrixMultiply_thread), dim3(blocks), dim3(threads), 0, 0, M, N, K, alpha, d_A, M, d_B, K, beta, d_C_thread, N); // float seconds = elapsed.seconds_elapsed(); // printf("matrix multiply warp per element M %d N %d K %d gflops %f.\n", M, N, K, (M*N*K) / seconds * 10e-9); } thrust::host_vector<float> h_C = C; thrust::host_vector<float> h_A = A; thrust::host_vector<float> h_B = B; thrust::host_vector<float> h_C_block = C_block; thrust::host_vector<float> h_C_warp = C_warp; thrust::host_vector<float> h_C_thread = C_thread; /* printf("%f %f %f %f %f %f.\n", h_A[0], h_A[1], h_A[2], h_A[3], h_A[4], h_A[5]); printf("%f %f %f %f %f %f.\n", h_B[0], h_B[1], h_B[2], h_B[3], h_B[4], h_B[5]); printf("%f %f %f %f %f %f.\n", h_C[0], h_C[1], h_C[2], h_C[3], h_C[4], h_C[5]); printf("%f %f %f %f %f %f.\n", h_C_block[0], h_C_block[1], h_C_block[2], h_C_block[3], h_C_block[4], h_C_block[5]); printf("%f %f %f %f %f %f.\n", h_C_warp[0], h_C_warp[1], h_C_warp[2], h_C_warp[3], h_C_warp[4], h_C_warp[5]);*/ // checking #if 0 { thrust::device_vector<float> residue(1); float *d_residue = thrust::raw_pointer_cast(&residue[0]); hipLaunchKernelGGL(( check_result), dim3(1), dim3(512), 0, 0, d_residue, d_C, d_C_block, N*M); thrust::host_vector<float> h_residue = residue; if (h_residue[0] > 0.01) printf("block diff !!!!!!!! %f.\n", h_residue[0]); } { thrust::device_vector<float> residue(1); float *d_residue = thrust::raw_pointer_cast(&residue[0]); hipLaunchKernelGGL(( check_result), dim3(1), dim3(512), 0, 0, d_residue, d_C, d_C_warp, N*M); thrust::host_vector<float> h_residue = residue; if (h_residue[0] > 0.01) printf("warp diff !!!!!!!! %f.\n", h_residue[0]); } { thrust::device_vector<float> residue(1); float *d_residue = thrust::raw_pointer_cast(&residue[0]); hipLaunchKernelGGL(( check_result), dim3(1), dim3(512), 0, 0, d_residue, d_C, d_C_thread, N*M); thrust::host_vector<float> h_residue = residue; if (h_residue[0] > 0.01) printf("thread diff !!!!!!! %f.\n", h_residue[0]); } #endif showProfileResult(gProfile); } #endif
712e57918be4b1711ce8b84ebaf12f8dafa03e72.cu
#include "matrixMul_kernel.cu" #include "ScopeProfile.h" extern std::vector<StepProfile> gProfile; extern std::map<std::string, long> gProfileGroup; void runTest(int argc, char **argv, int M, int K, int N) { #if 1 printf("************\n M = %d N = %d K = %d.\n", M, N, K); gProfile.clear(); cublasStatus status; status = cublasInit(); if (status != CUBLAS_STATUS_SUCCESS) { fprintf (stderr, "!!!! CUBLAS initialization error\n"); return; } /* thrust::host_vector<float> A(M*K); thrust::host_vector<float> B(K*N);*/ thrust::device_vector<float> A(M*K); thrust::device_vector<float> B(K*N); #if 1 thrust::sequence(A.begin(), A.end()); thrust::sequence(B.begin(), B.end()); #else for (int i = 0; i < M*K; i++) A[i] = rand()&1; for (int i = 0; i < N*K; i++) B[i] = rand()&1; #endif thrust::device_vector<float> C(M*N); thrust::device_vector<float> C_block = C; thrust::device_vector<float> C_warp = C; thrust::device_vector<float> C_subwarp = C; thrust::device_vector<float> C_thread = C; /* Clear last error */ cublasGetError(); float alpha = 1.0f; float beta = 0.0f; float *d_A = thrust::raw_pointer_cast(&A[0]); float *d_B = thrust::raw_pointer_cast(&B[0]); float *d_C = thrust::raw_pointer_cast(&C[0]); float *d_C_block = thrust::raw_pointer_cast(&C_block[0]); float *d_C_warp = thrust::raw_pointer_cast(&C_warp[0]); float *d_C_thread = thrust::raw_pointer_cast(&C_thread[0]); long flops = M*N*K ; /* Performs operation using cublas */ { GPUScopeProfile cublas("cublas"); cublasSgemm('n', 'n', M, N, K, alpha, d_A, M, d_B, K, beta, d_C, M); } status = cublasGetError(); if (status != CUBLAS_STATUS_SUCCESS) { fprintf (stderr, "!!!! kernel execution error.\n"); return; } // printf("matrix multiply cublas M %d N %d K %d gflops %f.\n", M, N, K, (M*N*K) / seconds * 10e-9); #if 0 { GPUScopeProfile mm_block("block"); // int warps_per_block = 8; dim3 blocks(M, N, 1); matrixMultiply_block<<<blocks, 256>>>(M, N, K, alpha, d_A, M, d_B, K, beta, d_C_block, N); // float seconds = elapsed.seconds_elapsed(); // printf("matrix multiply block per element M %d N %d K %d gflops %f.\n", M, N, K, (M*N*K) / seconds * 10e-9); } { GPUScopeProfile mm_warp("warp"); // int warps_per_block = 8; dim3 blocks(M, max((N-1)/WARPS_PER_BLOCK+1,1), 1); matrixMultiply_warp<<<blocks, BLOCK_SIZE>>>(M, N, K, alpha, d_A, M, d_B, K, beta, d_C_warp, N); // float seconds = elapsed.seconds_elapsed(); // printf("matrix multiply warp per element M %d N %d K %d gflops %f.\n", M, N, K, (M*N*K) / seconds * 10e-9); } #endif { GPUScopeProfile mm_thread("thread"); // int warps_per_block = 8; dim3 blocks((M-1)/BLOCK_SIZE_SQROOT+1, (N-1)/BLOCK_SIZE_SQROOT+1, 1); dim3 threads(16, 16, 1); matrixMultiply_thread<<<blocks, threads>>>(M, N, K, alpha, d_A, M, d_B, K, beta, d_C_thread, N); // float seconds = elapsed.seconds_elapsed(); // printf("matrix multiply warp per element M %d N %d K %d gflops %f.\n", M, N, K, (M*N*K) / seconds * 10e-9); } thrust::host_vector<float> h_C = C; thrust::host_vector<float> h_A = A; thrust::host_vector<float> h_B = B; thrust::host_vector<float> h_C_block = C_block; thrust::host_vector<float> h_C_warp = C_warp; thrust::host_vector<float> h_C_thread = C_thread; /* printf("%f %f %f %f %f %f.\n", h_A[0], h_A[1], h_A[2], h_A[3], h_A[4], h_A[5]); printf("%f %f %f %f %f %f.\n", h_B[0], h_B[1], h_B[2], h_B[3], h_B[4], h_B[5]); printf("%f %f %f %f %f %f.\n", h_C[0], h_C[1], h_C[2], h_C[3], h_C[4], h_C[5]); printf("%f %f %f %f %f %f.\n", h_C_block[0], h_C_block[1], h_C_block[2], h_C_block[3], h_C_block[4], h_C_block[5]); printf("%f %f %f %f %f %f.\n", h_C_warp[0], h_C_warp[1], h_C_warp[2], h_C_warp[3], h_C_warp[4], h_C_warp[5]);*/ // checking #if 0 { thrust::device_vector<float> residue(1); float *d_residue = thrust::raw_pointer_cast(&residue[0]); check_result<<<1, 512>>>(d_residue, d_C, d_C_block, N*M); thrust::host_vector<float> h_residue = residue; if (h_residue[0] > 0.01) printf("block diff !!!!!!!! %f.\n", h_residue[0]); } { thrust::device_vector<float> residue(1); float *d_residue = thrust::raw_pointer_cast(&residue[0]); check_result<<<1, 512>>>(d_residue, d_C, d_C_warp, N*M); thrust::host_vector<float> h_residue = residue; if (h_residue[0] > 0.01) printf("warp diff !!!!!!!! %f.\n", h_residue[0]); } { thrust::device_vector<float> residue(1); float *d_residue = thrust::raw_pointer_cast(&residue[0]); check_result<<<1, 512>>>(d_residue, d_C, d_C_thread, N*M); thrust::host_vector<float> h_residue = residue; if (h_residue[0] > 0.01) printf("thread diff !!!!!!! %f.\n", h_residue[0]); } #endif showProfileResult(gProfile); } #endif
f18a5316bc95843987bfe1042594e12b916ede12.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_post_pre_advec_z; int xdim0_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim0_advec_mom_kernel_post_pre_advec_z; int ydim0_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int xdim1_advec_mom_kernel_post_pre_advec_z; int xdim1_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim1_advec_mom_kernel_post_pre_advec_z; int ydim1_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int xdim2_advec_mom_kernel_post_pre_advec_z; int xdim2_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim2_advec_mom_kernel_post_pre_advec_z; int ydim2_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int xdim3_advec_mom_kernel_post_pre_advec_z; int xdim3_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim3_advec_mom_kernel_post_pre_advec_z; int ydim3_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int xdim4_advec_mom_kernel_post_pre_advec_z; int xdim4_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim4_advec_mom_kernel_post_pre_advec_z; int ydim4_advec_mom_kernel_post_pre_advec_z_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim0_advec_mom_kernel_post_pre_advec_z * \ ydim0_advec_mom_kernel_post_pre_advec_z * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim1_advec_mom_kernel_post_pre_advec_z * \ ydim1_advec_mom_kernel_post_pre_advec_z * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim2_advec_mom_kernel_post_pre_advec_z * \ ydim2_advec_mom_kernel_post_pre_advec_z * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim3_advec_mom_kernel_post_pre_advec_z * \ ydim3_advec_mom_kernel_post_pre_advec_z * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim4_advec_mom_kernel_post_pre_advec_z * \ ydim4_advec_mom_kernel_post_pre_advec_z * (z)) // user function __device__ inline void advec_mom_kernel_post_pre_advec_z_gpu(double *node_mass_post, const double *post_vol, const double *density1, double *node_mass_pre, const double *node_flux) { node_mass_post[OPS_ACC0(0, 0, 0)] = 0.125 * (density1[OPS_ACC2(0, -1, 0)] * post_vol[OPS_ACC1(0, -1, 0)] + density1[OPS_ACC2(0, 0, 0)] * post_vol[OPS_ACC1(0, 0, 0)] + density1[OPS_ACC2(-1, -1, 0)] * post_vol[OPS_ACC1(-1, -1, 0)] + density1[OPS_ACC2(-1, 0, 0)] * post_vol[OPS_ACC1(-1, 0, 0)] + density1[OPS_ACC2(0, -1, -1)] * post_vol[OPS_ACC1(0, -1, -1)] + density1[OPS_ACC2(0, 0, -1)] * post_vol[OPS_ACC1(0, 0, -1)] + density1[OPS_ACC2(-1, -1, -1)] * post_vol[OPS_ACC1(-1, -1, -1)] + density1[OPS_ACC2(-1, 0, -1)] * post_vol[OPS_ACC1(-1, 0, -1)]); node_mass_pre[OPS_ACC3(0, 0, 0)] = node_mass_post[OPS_ACC0(0, 0, 0)] - node_flux[OPS_ACC4(0, 0, -1)] + node_flux[OPS_ACC4(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_mom_kernel_post_pre_advec_z( double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, double *__restrict arg3, const double *__restrict arg4, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim0_advec_mom_kernel_post_pre_advec_z * ydim0_advec_mom_kernel_post_pre_advec_z; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim1_advec_mom_kernel_post_pre_advec_z * ydim1_advec_mom_kernel_post_pre_advec_z; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim2_advec_mom_kernel_post_pre_advec_z * ydim2_advec_mom_kernel_post_pre_advec_z; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim3_advec_mom_kernel_post_pre_advec_z * ydim3_advec_mom_kernel_post_pre_advec_z; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim4_advec_mom_kernel_post_pre_advec_z * ydim4_advec_mom_kernel_post_pre_advec_z; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_post_pre_advec_z_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_post_pre_advec_z( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel_post_pre_advec_z_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif // Timing double t1, t2, c1, c2; ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 5, range, 136)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(136, "advec_mom_kernel_post_pre_advec_z"); OPS_kernels[136].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_post_pre_advec_z_h || ydim0 != ydim0_advec_mom_kernel_post_pre_advec_z_h || xdim1 != xdim1_advec_mom_kernel_post_pre_advec_z_h || ydim1 != ydim1_advec_mom_kernel_post_pre_advec_z_h || xdim2 != xdim2_advec_mom_kernel_post_pre_advec_z_h || ydim2 != ydim2_advec_mom_kernel_post_pre_advec_z_h || xdim3 != xdim3_advec_mom_kernel_post_pre_advec_z_h || ydim3 != ydim3_advec_mom_kernel_post_pre_advec_z_h || xdim4 != xdim4_advec_mom_kernel_post_pre_advec_z_h || ydim4 != ydim4_advec_mom_kernel_post_pre_advec_z_h) { hipMemcpyToSymbol(xdim0_advec_mom_kernel_post_pre_advec_z, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_post_pre_advec_z_h = xdim0; hipMemcpyToSymbol(ydim0_advec_mom_kernel_post_pre_advec_z, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_post_pre_advec_z_h = ydim0; hipMemcpyToSymbol(xdim1_advec_mom_kernel_post_pre_advec_z, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_post_pre_advec_z_h = xdim1; hipMemcpyToSymbol(ydim1_advec_mom_kernel_post_pre_advec_z, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_post_pre_advec_z_h = ydim1; hipMemcpyToSymbol(xdim2_advec_mom_kernel_post_pre_advec_z, &xdim2, sizeof(int)); xdim2_advec_mom_kernel_post_pre_advec_z_h = xdim2; hipMemcpyToSymbol(ydim2_advec_mom_kernel_post_pre_advec_z, &ydim2, sizeof(int)); ydim2_advec_mom_kernel_post_pre_advec_z_h = ydim2; hipMemcpyToSymbol(xdim3_advec_mom_kernel_post_pre_advec_z, &xdim3, sizeof(int)); xdim3_advec_mom_kernel_post_pre_advec_z_h = xdim3; hipMemcpyToSymbol(ydim3_advec_mom_kernel_post_pre_advec_z, &ydim3, sizeof(int)); ydim3_advec_mom_kernel_post_pre_advec_z_h = ydim3; hipMemcpyToSymbol(xdim4_advec_mom_kernel_post_pre_advec_z, &xdim4, sizeof(int)); xdim4_advec_mom_kernel_post_pre_advec_z_h = xdim4; hipMemcpyToSymbol(ydim4_advec_mom_kernel_post_pre_advec_z, &ydim4, sizeof(int)); ydim4_advec_mom_kernel_post_pre_advec_z_h = ydim4; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args, 5, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[136].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_advec_mom_kernel_post_pre_advec_z), dim3(grid), dim3(tblock), 0, 0, (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], x_size, y_size, z_size); cutilSafeCall(hipGetLastError()); if (OPS_diags > 1) { cutilSafeCall(hipDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[136].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[3], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[136].mpi_time += t2 - t1; OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_post_pre_advec_z( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 136; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 136; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg *)malloc(5 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel_post_pre_advec_z_execute; if (OPS_diags > 1) { ops_timing_realloc(136, "advec_mom_kernel_post_pre_advec_z"); } ops_enqueue_kernel(desc); } #endif
f18a5316bc95843987bfe1042594e12b916ede12.cu
// // auto-generated by ops.py // __constant__ int xdim0_advec_mom_kernel_post_pre_advec_z; int xdim0_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim0_advec_mom_kernel_post_pre_advec_z; int ydim0_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int xdim1_advec_mom_kernel_post_pre_advec_z; int xdim1_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim1_advec_mom_kernel_post_pre_advec_z; int ydim1_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int xdim2_advec_mom_kernel_post_pre_advec_z; int xdim2_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim2_advec_mom_kernel_post_pre_advec_z; int ydim2_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int xdim3_advec_mom_kernel_post_pre_advec_z; int xdim3_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim3_advec_mom_kernel_post_pre_advec_z; int ydim3_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int xdim4_advec_mom_kernel_post_pre_advec_z; int xdim4_advec_mom_kernel_post_pre_advec_z_h = -1; __constant__ int ydim4_advec_mom_kernel_post_pre_advec_z; int ydim4_advec_mom_kernel_post_pre_advec_z_h = -1; #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 #define OPS_ACC0(x, y, z) \ (x + xdim0_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim0_advec_mom_kernel_post_pre_advec_z * \ ydim0_advec_mom_kernel_post_pre_advec_z * (z)) #define OPS_ACC1(x, y, z) \ (x + xdim1_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim1_advec_mom_kernel_post_pre_advec_z * \ ydim1_advec_mom_kernel_post_pre_advec_z * (z)) #define OPS_ACC2(x, y, z) \ (x + xdim2_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim2_advec_mom_kernel_post_pre_advec_z * \ ydim2_advec_mom_kernel_post_pre_advec_z * (z)) #define OPS_ACC3(x, y, z) \ (x + xdim3_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim3_advec_mom_kernel_post_pre_advec_z * \ ydim3_advec_mom_kernel_post_pre_advec_z * (z)) #define OPS_ACC4(x, y, z) \ (x + xdim4_advec_mom_kernel_post_pre_advec_z * (y) + \ xdim4_advec_mom_kernel_post_pre_advec_z * \ ydim4_advec_mom_kernel_post_pre_advec_z * (z)) // user function __device__ inline void advec_mom_kernel_post_pre_advec_z_gpu(double *node_mass_post, const double *post_vol, const double *density1, double *node_mass_pre, const double *node_flux) { node_mass_post[OPS_ACC0(0, 0, 0)] = 0.125 * (density1[OPS_ACC2(0, -1, 0)] * post_vol[OPS_ACC1(0, -1, 0)] + density1[OPS_ACC2(0, 0, 0)] * post_vol[OPS_ACC1(0, 0, 0)] + density1[OPS_ACC2(-1, -1, 0)] * post_vol[OPS_ACC1(-1, -1, 0)] + density1[OPS_ACC2(-1, 0, 0)] * post_vol[OPS_ACC1(-1, 0, 0)] + density1[OPS_ACC2(0, -1, -1)] * post_vol[OPS_ACC1(0, -1, -1)] + density1[OPS_ACC2(0, 0, -1)] * post_vol[OPS_ACC1(0, 0, -1)] + density1[OPS_ACC2(-1, -1, -1)] * post_vol[OPS_ACC1(-1, -1, -1)] + density1[OPS_ACC2(-1, 0, -1)] * post_vol[OPS_ACC1(-1, 0, -1)]); node_mass_pre[OPS_ACC3(0, 0, 0)] = node_mass_post[OPS_ACC0(0, 0, 0)] - node_flux[OPS_ACC4(0, 0, -1)] + node_flux[OPS_ACC4(0, 0, 0)]; } #undef OPS_ACC0 #undef OPS_ACC1 #undef OPS_ACC2 #undef OPS_ACC3 #undef OPS_ACC4 __global__ void ops_advec_mom_kernel_post_pre_advec_z( double *__restrict arg0, const double *__restrict arg1, const double *__restrict arg2, double *__restrict arg3, const double *__restrict arg4, int size0, int size1, int size2) { int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim0_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim0_advec_mom_kernel_post_pre_advec_z * ydim0_advec_mom_kernel_post_pre_advec_z; arg1 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim1_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim1_advec_mom_kernel_post_pre_advec_z * ydim1_advec_mom_kernel_post_pre_advec_z; arg2 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim2_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim2_advec_mom_kernel_post_pre_advec_z * ydim2_advec_mom_kernel_post_pre_advec_z; arg3 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim3_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim3_advec_mom_kernel_post_pre_advec_z * ydim3_advec_mom_kernel_post_pre_advec_z; arg4 += idx_x * 1 * 1 + idx_y * 1 * 1 * xdim4_advec_mom_kernel_post_pre_advec_z + idx_z * 1 * 1 * xdim4_advec_mom_kernel_post_pre_advec_z * ydim4_advec_mom_kernel_post_pre_advec_z; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { advec_mom_kernel_post_pre_advec_z_gpu(arg0, arg1, arg2, arg3, arg4); } } // host stub function #ifndef OPS_LAZY void ops_par_loop_advec_mom_kernel_post_pre_advec_z( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { #else void ops_par_loop_advec_mom_kernel_post_pre_advec_z_execute( ops_kernel_descriptor *desc) { int dim = desc->dim; int *range = desc->range; ops_arg arg0 = desc->args[0]; ops_arg arg1 = desc->args[1]; ops_arg arg2 = desc->args[2]; ops_arg arg3 = desc->args[3]; ops_arg arg4 = desc->args[4]; #endif // Timing double t1, t2, c1, c2; ops_arg args[5] = {arg0, arg1, arg2, arg3, arg4}; #if CHECKPOINTING && !OPS_LAZY if (!ops_checkpointing_before(args, 5, range, 136)) return; #endif if (OPS_diags > 1) { ops_timing_realloc(136, "advec_mom_kernel_post_pre_advec_z"); OPS_kernels[136].count++; ops_timers_core(&c1, &t1); } // compute locally allocated range for the sub-block int start[3]; int end[3]; #if OPS_MPI && !OPS_LAZY sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for (int n = 0; n < 3; n++) { start[n] = sb->decomp_disp[n]; end[n] = sb->decomp_disp[n] + sb->decomp_size[n]; if (start[n] >= range[2 * n]) { start[n] = 0; } else { start[n] = range[2 * n] - start[n]; } if (sb->id_m[n] == MPI_PROC_NULL && range[2 * n] < 0) start[n] = range[2 * n]; if (end[n] >= range[2 * n + 1]) { end[n] = range[2 * n + 1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n] == MPI_PROC_NULL && (range[2 * n + 1] > sb->decomp_disp[n] + sb->decomp_size[n])) end[n] += (range[2 * n + 1] - sb->decomp_disp[n] - sb->decomp_size[n]); } #else for (int n = 0; n < 3; n++) { start[n] = range[2 * n]; end[n] = range[2 * n + 1]; } #endif int x_size = MAX(0, end[0] - start[0]); int y_size = MAX(0, end[1] - start[1]); int z_size = MAX(0, end[2] - start[2]); int xdim0 = args[0].dat->size[0]; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]; int ydim1 = args[1].dat->size[1]; int xdim2 = args[2].dat->size[0]; int ydim2 = args[2].dat->size[1]; int xdim3 = args[3].dat->size[0]; int ydim3 = args[3].dat->size[1]; int xdim4 = args[4].dat->size[0]; int ydim4 = args[4].dat->size[1]; if (xdim0 != xdim0_advec_mom_kernel_post_pre_advec_z_h || ydim0 != ydim0_advec_mom_kernel_post_pre_advec_z_h || xdim1 != xdim1_advec_mom_kernel_post_pre_advec_z_h || ydim1 != ydim1_advec_mom_kernel_post_pre_advec_z_h || xdim2 != xdim2_advec_mom_kernel_post_pre_advec_z_h || ydim2 != ydim2_advec_mom_kernel_post_pre_advec_z_h || xdim3 != xdim3_advec_mom_kernel_post_pre_advec_z_h || ydim3 != ydim3_advec_mom_kernel_post_pre_advec_z_h || xdim4 != xdim4_advec_mom_kernel_post_pre_advec_z_h || ydim4 != ydim4_advec_mom_kernel_post_pre_advec_z_h) { cudaMemcpyToSymbol(xdim0_advec_mom_kernel_post_pre_advec_z, &xdim0, sizeof(int)); xdim0_advec_mom_kernel_post_pre_advec_z_h = xdim0; cudaMemcpyToSymbol(ydim0_advec_mom_kernel_post_pre_advec_z, &ydim0, sizeof(int)); ydim0_advec_mom_kernel_post_pre_advec_z_h = ydim0; cudaMemcpyToSymbol(xdim1_advec_mom_kernel_post_pre_advec_z, &xdim1, sizeof(int)); xdim1_advec_mom_kernel_post_pre_advec_z_h = xdim1; cudaMemcpyToSymbol(ydim1_advec_mom_kernel_post_pre_advec_z, &ydim1, sizeof(int)); ydim1_advec_mom_kernel_post_pre_advec_z_h = ydim1; cudaMemcpyToSymbol(xdim2_advec_mom_kernel_post_pre_advec_z, &xdim2, sizeof(int)); xdim2_advec_mom_kernel_post_pre_advec_z_h = xdim2; cudaMemcpyToSymbol(ydim2_advec_mom_kernel_post_pre_advec_z, &ydim2, sizeof(int)); ydim2_advec_mom_kernel_post_pre_advec_z_h = ydim2; cudaMemcpyToSymbol(xdim3_advec_mom_kernel_post_pre_advec_z, &xdim3, sizeof(int)); xdim3_advec_mom_kernel_post_pre_advec_z_h = xdim3; cudaMemcpyToSymbol(ydim3_advec_mom_kernel_post_pre_advec_z, &ydim3, sizeof(int)); ydim3_advec_mom_kernel_post_pre_advec_z_h = ydim3; cudaMemcpyToSymbol(xdim4_advec_mom_kernel_post_pre_advec_z, &xdim4, sizeof(int)); xdim4_advec_mom_kernel_post_pre_advec_z_h = xdim4; cudaMemcpyToSymbol(ydim4_advec_mom_kernel_post_pre_advec_z, &ydim4, sizeof(int)); ydim4_advec_mom_kernel_post_pre_advec_z_h = ydim4; } dim3 grid((x_size - 1) / OPS_block_size_x + 1, (y_size - 1) / OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x, OPS_block_size_y, 1); int dat0 = (OPS_soa ? args[0].dat->type_size : args[0].dat->elem_size); int dat1 = (OPS_soa ? args[1].dat->type_size : args[1].dat->elem_size); int dat2 = (OPS_soa ? args[2].dat->type_size : args[2].dat->elem_size); int dat3 = (OPS_soa ? args[3].dat->type_size : args[3].dat->elem_size); int dat4 = (OPS_soa ? args[4].dat->type_size : args[4].dat->elem_size); char *p_a[5]; // set up initial pointers int base0 = args[0].dat->base_offset + dat0 * 1 * (start[0] * args[0].stencil->stride[0]); base0 = base0 + dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1]); base0 = base0 + dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2]); p_a[0] = (char *)args[0].data_d + base0; int base1 = args[1].dat->base_offset + dat1 * 1 * (start[0] * args[1].stencil->stride[0]); base1 = base1 + dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1]); base1 = base1 + dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2]); p_a[1] = (char *)args[1].data_d + base1; int base2 = args[2].dat->base_offset + dat2 * 1 * (start[0] * args[2].stencil->stride[0]); base2 = base2 + dat2 * args[2].dat->size[0] * (start[1] * args[2].stencil->stride[1]); base2 = base2 + dat2 * args[2].dat->size[0] * args[2].dat->size[1] * (start[2] * args[2].stencil->stride[2]); p_a[2] = (char *)args[2].data_d + base2; int base3 = args[3].dat->base_offset + dat3 * 1 * (start[0] * args[3].stencil->stride[0]); base3 = base3 + dat3 * args[3].dat->size[0] * (start[1] * args[3].stencil->stride[1]); base3 = base3 + dat3 * args[3].dat->size[0] * args[3].dat->size[1] * (start[2] * args[3].stencil->stride[2]); p_a[3] = (char *)args[3].data_d + base3; int base4 = args[4].dat->base_offset + dat4 * 1 * (start[0] * args[4].stencil->stride[0]); base4 = base4 + dat4 * args[4].dat->size[0] * (start[1] * args[4].stencil->stride[1]); base4 = base4 + dat4 * args[4].dat->size[0] * args[4].dat->size[1] * (start[2] * args[4].stencil->stride[2]); p_a[4] = (char *)args[4].data_d + base4; #ifndef OPS_LAZY ops_H_D_exchanges_device(args, 5); ops_halo_exchanges(args, 5, range); #endif if (OPS_diags > 1) { ops_timers_core(&c2, &t2); OPS_kernels[136].mpi_time += t2 - t1; } // call kernel wrapper function, passing in pointers to data ops_advec_mom_kernel_post_pre_advec_z<<<grid, tblock>>>( (double *)p_a[0], (double *)p_a[1], (double *)p_a[2], (double *)p_a[3], (double *)p_a[4], x_size, y_size, z_size); cutilSafeCall(cudaGetLastError()); if (OPS_diags > 1) { cutilSafeCall(cudaDeviceSynchronize()); ops_timers_core(&c1, &t1); OPS_kernels[136].time += t1 - t2; } #ifndef OPS_LAZY ops_set_dirtybit_device(args, 5); ops_set_halo_dirtybit3(&args[0], range); ops_set_halo_dirtybit3(&args[3], range); #endif if (OPS_diags > 1) { // Update kernel record ops_timers_core(&c2, &t2); OPS_kernels[136].mpi_time += t2 - t1; OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg0); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg1); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg2); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg3); OPS_kernels[136].transfer += ops_compute_transfer(dim, start, end, &arg4); } } #ifdef OPS_LAZY void ops_par_loop_advec_mom_kernel_post_pre_advec_z( char const *name, ops_block block, int dim, int *range, ops_arg arg0, ops_arg arg1, ops_arg arg2, ops_arg arg3, ops_arg arg4) { ops_kernel_descriptor *desc = (ops_kernel_descriptor *)malloc(sizeof(ops_kernel_descriptor)); desc->name = name; desc->block = block; desc->dim = dim; desc->device = 1; desc->index = 136; desc->hash = 5381; desc->hash = ((desc->hash << 5) + desc->hash) + 136; for (int i = 0; i < 6; i++) { desc->range[i] = range[i]; desc->orig_range[i] = range[i]; desc->hash = ((desc->hash << 5) + desc->hash) + range[i]; } desc->nargs = 5; desc->args = (ops_arg *)malloc(5 * sizeof(ops_arg)); desc->args[0] = arg0; desc->hash = ((desc->hash << 5) + desc->hash) + arg0.dat->index; desc->args[1] = arg1; desc->hash = ((desc->hash << 5) + desc->hash) + arg1.dat->index; desc->args[2] = arg2; desc->hash = ((desc->hash << 5) + desc->hash) + arg2.dat->index; desc->args[3] = arg3; desc->hash = ((desc->hash << 5) + desc->hash) + arg3.dat->index; desc->args[4] = arg4; desc->hash = ((desc->hash << 5) + desc->hash) + arg4.dat->index; desc->function = ops_par_loop_advec_mom_kernel_post_pre_advec_z_execute; if (OPS_diags > 1) { ops_timing_realloc(136, "advec_mom_kernel_post_pre_advec_z"); } ops_enqueue_kernel(desc); } #endif
a4521a2d5f6f0aa62d4686c9e9e8ec0ab537e4d7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <string.h> #include <stdlib.h> #include "hist-equ.cu.h" extern "C"{ #include "hist-equ.h" } __device__ void print_id() { printf("by: %d, bx: %d, tx: %d, ty: %d\n", blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x); return; } __global__ void histogram_GPU(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin) { int pixel; //if (((blockIdx.x / blockDim.x) + 1) * (blockDim.x * blockDim.x) < img_size) { //pixel = (blockIdx.x / blockDim.x) * (blockDim.x * blockDim.x) + (threadIdx.x * blockDim.x) + (blockIdx.x % blockDim.x); //} //else { pixel = blockIdx.x * blockDim.x + threadIdx.x; //} //pixel = threadIdx.x * img_size / MAX_THREAD_IN_BLOCK + blockIdx.x; /*if (blockIdx.x == 1024 && threadIdx.x == 0) { printf("pixel: %d\n", pixel); }*/ extern __shared__ int sh_hist_out[]; if (threadIdx.x < nbr_bin) { sh_hist_out[threadIdx.x] = 0; } __syncthreads(); if (pixel < img_size) { atomicAdd(&sh_hist_out[img_in[pixel]], 1); } __syncthreads(); if (threadIdx.x < nbr_bin) { atomicAdd(&hist_out[threadIdx.x], sh_hist_out[threadIdx.x]); } } __global__ void histogram_lut_GPU(int * hist_in, int * lut, int img_size, int nbr_bin) { int i, cdf, min, d; /* Construct the LUT by calculating the CDF */ cdf = 0; min = 0; i = 0; while (min == 0) { min = hist_in[i++]; } d = img_size - min; for (i = 0; i < nbr_bin; i ++) { cdf += hist_in[i]; //lut[i] = (cdf - min)*(nbr_bin - 1)/d; lut[i] = (int)(((float)cdf - min)*255/d + 0.5); if (lut[i] < 0) { lut[i] = 0; } else if (lut[i] > 255) { lut[i] = 255; } } } __global__ void histogram_equalization_GPU(unsigned char * img_out, unsigned char * img_in, int * lut, int img_size) { int pixel; pixel = blockIdx.x * blockDim.x + threadIdx.x; /* Get the result image */ if (pixel < img_size) { img_out[pixel] = (unsigned char)lut[img_in[pixel]]; } }
a4521a2d5f6f0aa62d4686c9e9e8ec0ab537e4d7.cu
#include <stdio.h> #include <string.h> #include <stdlib.h> #include "hist-equ.cu.h" extern "C"{ #include "hist-equ.h" } __device__ void print_id() { printf("by: %d, bx: %d, tx: %d, ty: %d\n", blockIdx.y, blockIdx.x, threadIdx.y, threadIdx.x); return; } __global__ void histogram_GPU(int * hist_out, unsigned char * img_in, int img_size, int nbr_bin) { int pixel; //if (((blockIdx.x / blockDim.x) + 1) * (blockDim.x * blockDim.x) < img_size) { //pixel = (blockIdx.x / blockDim.x) * (blockDim.x * blockDim.x) + (threadIdx.x * blockDim.x) + (blockIdx.x % blockDim.x); //} //else { pixel = blockIdx.x * blockDim.x + threadIdx.x; //} //pixel = threadIdx.x * img_size / MAX_THREAD_IN_BLOCK + blockIdx.x; /*if (blockIdx.x == 1024 && threadIdx.x == 0) { printf("pixel: %d\n", pixel); }*/ extern __shared__ int sh_hist_out[]; if (threadIdx.x < nbr_bin) { sh_hist_out[threadIdx.x] = 0; } __syncthreads(); if (pixel < img_size) { atomicAdd(&sh_hist_out[img_in[pixel]], 1); } __syncthreads(); if (threadIdx.x < nbr_bin) { atomicAdd(&hist_out[threadIdx.x], sh_hist_out[threadIdx.x]); } } __global__ void histogram_lut_GPU(int * hist_in, int * lut, int img_size, int nbr_bin) { int i, cdf, min, d; /* Construct the LUT by calculating the CDF */ cdf = 0; min = 0; i = 0; while (min == 0) { min = hist_in[i++]; } d = img_size - min; for (i = 0; i < nbr_bin; i ++) { cdf += hist_in[i]; //lut[i] = (cdf - min)*(nbr_bin - 1)/d; lut[i] = (int)(((float)cdf - min)*255/d + 0.5); if (lut[i] < 0) { lut[i] = 0; } else if (lut[i] > 255) { lut[i] = 255; } } } __global__ void histogram_equalization_GPU(unsigned char * img_out, unsigned char * img_in, int * lut, int img_size) { int pixel; pixel = blockIdx.x * blockDim.x + threadIdx.x; /* Get the result image */ if (pixel < img_size) { img_out[pixel] = (unsigned char)lut[img_in[pixel]]; } }
1ab820dee798fcdd907fb05943a2912bda0c9d14.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * University of Illinois Open Source License * Copyright 2008-2018 Luthey-Schulten Group, * Copyright 2012 Roberts Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Overflow algorithm in RDME solvers and CPU assignment (2012) * Developed by: Roberts Group * Johns Hopkins University * http://biophysics.jhu.edu/roberts/ * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, the Roberts Group, Johns Hopkins University, nor the names * of its contributors may be used to endorse or promote products derived from * this Software without specific prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Elijah Roberts, Zane Thornburg */ #include <map> #include <string> #include <cstdlib> #include "config.h" #if defined(MACOSX) #include <mach/mach_time.h> #elif defined(LINUX) #include <time.h> #endif #include "cuda/lm_cuda.h" #include "cuda/ldg.h" #include "core/Math.h" #include "core/Print.h" #include "cme/CMESolver.h" #include "DiffusionModel.pb.h" #include "Lattice.pb.h" #include "SpeciesCounts.pb.h" #include "core/DataOutputQueue.h" #include "core/ResourceAllocator.h" #include "rdme/ByteLattice.h" #include "rdme/CudaByteLattice.h" #include "rdme/MpdRdmeSolver.h" #include "rng/RandomGenerator.h" #include "lptf/Profile.h" #include <sstream> #include "core/Timer.h" #include "io/ArbitraryH5.h" #define MPD_WORDS_PER_SITE (MPD_LATTICE_MAX_OCCUPANCY / 4)// 4 #define MPD_APRON_SIZE 1 #include "cuda/constant.cuh" namespace lm { namespace rdme { namespace mpdrdme_dev { #include "rdme/dev/xor_random_dev.cu" #include "rdme/dev/lattice_sim_1d_dev.cu" #include "rdme/dev/byte_diffusion_1d_dev.cu" #include "rdme/dev/byte_reaction_dev.cu" }}} extern bool globalAbort; using std::map; using lm::io::DiffusionModel; using lm::rdme::Lattice; using lm::rng::RandomGenerator; namespace lm { namespace rdme { MpdRdmeSolver::MpdRdmeSolver() :RDMESolver(lm::rng::RandomGenerator::NONE),seed(0),cudaOverflowList(NULL),cudaStream(0),tau(0.0),overflowTimesteps(0),overflowListUses(0) { } void MpdRdmeSolver::initialize(unsigned int replicate, map<string,string> * parameters, ResourceAllocator::ComputeResources * resources) { RDMESolver::initialize(replicate, parameters, resources); // Figure out the random seed. uint32_t seedTop=(unsigned int)atoi((*parameters)["seed"].c_str()); if (seedTop == 0) { #if defined(MACOSX) seedTop = (uint32_t)mach_absolute_time(); #elif defined(LINUX) struct timespec seed_timespec; if (clock_gettime(CLOCK_REALTIME, &seed_timespec) != 0) throw lm::Exception("Error getting time to use for random seed."); seedTop = seed_timespec.tv_nsec; #endif } seed = (seedTop<<16)|(replicate&0x0000FFFF); Print::printf(Print::INFO, "MPDRDME: Rng seed: top word %u, bottom word %u", (seed>>16)*0xffff, seed&0xFFFF); string &s = (*parameters)["rdme.mpd.overflowhandler"]; if(s == "classic") { overflow_handling = OVERFLOW_MODE_CLASSIC; Print::printf(Print::DEBUG, "Overflow handler set to classic"); } else if(s == "relaxed") { overflow_handling = OVERFLOW_MODE_RELAXED; Print::printf(Print::DEBUG, "Overflow handler set to relaxed"); } else if(s == "") { overflow_handling = OVERFLOW_MODE_CLASSIC; Print::printf(Print::DEBUG, "Overflow handler set to default (classic)"); } else { overflow_handling = OVERFLOW_MODE_CLASSIC; Print::printf(Print::WARNING, "Unknown overflow handler requested: '%s'", s.c_str()); } #ifdef MPD_MAPPED_OVERFLOWS CUDA_EXCEPTION_CHECK(hipHostMalloc(&cudaOverflowList, MPD_OVERFLOW_LIST_SIZE, hipHostMallocPortable|hipHostMallocMapped)); memset(cudaOverflowList, 0, MPD_OVERFLOW_LIST_SIZE); #else // Allocate memory on the device for the exception list. CUDA_EXCEPTION_CHECK(hipMalloc(&cudaOverflowList, MPD_OVERFLOW_LIST_SIZE)); //TODO: track memory usage. CUDA_EXCEPTION_CHECK(hipMemset(cudaOverflowList, 0, MPD_OVERFLOW_LIST_SIZE)); #endif // Create a stream for synchronizing the events. CUDA_EXCEPTION_CHECK(hipStreamCreate(&cudaStream)); } MpdRdmeSolver::~MpdRdmeSolver() { // Free any device memory. if (cudaOverflowList != NULL) { #ifdef MPD_MAPPED_OVERFLOWS CUDA_EXCEPTION_CHECK_NOTHROW(hipHostFree(cudaOverflowList)); //TODO: track memory usage. #else CUDA_EXCEPTION_CHECK_NOTHROW(hipFree(cudaOverflowList)); //TODO: track memory usage. #endif cudaOverflowList = NULL; } // If we have created a stream, destroy it. if (cudaStream != 0) { CUDA_EXCEPTION_CHECK_NOTHROW(hipStreamDestroy(cudaStream)); cudaStream = NULL; } } void MpdRdmeSolver::allocateLattice(lattice_size_t latticeXSize, lattice_size_t latticeYSize, lattice_size_t latticeZSize, site_size_t particlesPerSite, const unsigned int bytes_per_particle, si_dist_t latticeSpacing) { assert(bytes_per_particle == 1); if(particlesPerSite != MPD_LATTICE_MAX_OCCUPANCY) { Print::printf(Print::ERROR, "requested allocation for %d particles per site is not %d", particlesPerSite, MPD_LATTICE_MAX_OCCUPANCY); throw Exception("incorrect particle density"); } if(latticeXSize % 32 != 0) { Print::printf(Print::ERROR, "The X dimension (%d) is not divisible by %d", latticeXSize, 32); throw Exception("incorrect lattice dimensions"); } if(latticeYSize % TUNE_MPD_Y_BLOCK_Y_SIZE != 0) { Print::printf(Print::ERROR, "The Y dimension (%d) is not divisible by %d", latticeYSize, TUNE_MPD_Y_BLOCK_Y_SIZE); throw Exception("incorrect lattice dimensions"); } if(latticeZSize % TUNE_MPD_Z_BLOCK_Z_SIZE != 0) { Print::printf(Print::ERROR, "The Z dimension (%d) is not divisible by %d", latticeZSize, TUNE_MPD_Z_BLOCK_Z_SIZE); throw Exception("incorrect lattice dimensions"); } try { CudaByteLattice *cbl = new CudaByteLattice(latticeXSize, latticeYSize, latticeZSize, latticeSpacing, particlesPerSite); lattice = (Lattice *)cbl; } catch (CUDAException e) { Print::printf(Print::ERROR, e.what()); throw Exception("Failed to create cuda lattice"); } } void MpdRdmeSolver::buildModel(const uint numberSpeciesA, const uint numberReactionsA, const uint * initialSpeciesCountsA, const uint * reactionTypesA, const double * KA, const int * SA, const uint * DA, const uint kCols) { CMESolver::buildModel(numberSpeciesA, numberReactionsA, initialSpeciesCountsA, reactionTypesA, KA, SA, DA, kCols); // Get the time step. tau=atof((*parameters)["timestep"].c_str()); if (tau <= 0.0) throw InvalidArgException("timestep", "A positive timestep must be specified for the solver."); // Make sure we can support the reaction model. if (numberReactions > MPD_MAX_REACTION_TABLE_ENTRIES) { std::stringstream s; s << "The number of reaction table entries (" << numberReactions <<") exceeds the maximum supported by the solver (" << MPD_MAX_REACTION_TABLE_ENTRIES << ")."; throw Exception(s.str().c_str()); } #ifndef MPD_GLOBAL_S_MATRIX if (numberSpecies*numberReactions > MPD_MAX_S_MATRIX_ENTRIES) { std::stringstream s; s << "The number of S matrix entries (" << numberSpecies*numberReactions <<") exceeds the maximum supported by the solver (" << MPD_MAX_S_MATRIX_ENTRIES << ")."; throw Exception(s.str().c_str()); } #endif // Setup the cuda reaction model. unsigned int * reactionOrders = new unsigned int[numberReactions]; unsigned int * reactionSites = new unsigned int[numberReactions]; unsigned int * D1 = new unsigned int[numberReactions]; unsigned int * D2 = new unsigned int[numberReactions]; for (uint i=0; i<numberReactions; i++) { if(reactionTypes[i] == ZerothOrderPropensityArgs::REACTION_TYPE) { reactionOrders[i] = MPD_ZERO_ORDER_REACTION; reactionSites[i] = 0; D1[i] = 0; D2[i] = 0; } else if (reactionTypes[i] == FirstOrderPropensityArgs::REACTION_TYPE) { reactionOrders[i] = MPD_FIRST_ORDER_REACTION; reactionSites[i] = 0; D1[i] = ((FirstOrderPropensityArgs *)propensityFunctionArgs[i])->si+1; D2[i] = 0; } else if (reactionTypes[i] == SecondOrderPropensityArgs::REACTION_TYPE) { reactionOrders[i] = MPD_SECOND_ORDER_REACTION; reactionSites[i] = 0; D1[i] = ((SecondOrderPropensityArgs *)propensityFunctionArgs[i])->s1i+1; D2[i] = ((SecondOrderPropensityArgs *)propensityFunctionArgs[i])->s2i+1; } else if (reactionTypes[i] == SecondOrderSelfPropensityArgs::REACTION_TYPE) { reactionOrders[i] = MPD_SECOND_ORDER_SELF_REACTION; reactionSites[i] = 0; D1[i] = ((SecondOrderSelfPropensityArgs *)propensityFunctionArgs[i])->si+1; D2[i] = 0; } else { throw InvalidArgException("reactionTypeA", "the reaction type was not supported by the solver", reactionTypes[i]); } } // Setup the cuda S matrix. // Transpose S on the device because we will want to read along the numSpecies axis // Before A1A2A3A4A5 B1B2B3B4B5 C1C2C3C4C5 ... // After A1B1C1 A2B2C2 ... /* int8_t * tmpS = new int8_t[numberSpecies*numberReactions]; for (uint i=0; i<numberSpecies*numberReactions; i++) { tmpS[i] = S[i]; } */ int8_t * tmpS = new int8_t[numberSpecies*numberReactions]; for(uint rx = 0; rx < numberReactions; rx++) { for (uint p=0; p<numberSpecies; p++) { //tmpS[numberSpecies*rx + p] = S[numberSpecies*rx + p]; tmpS[rx * numberSpecies + p] = S[numberReactions*p + rx]; } } #ifdef MPD_GLOBAL_R_MATRIX // R matrix put in global memory //hipMalloc(&numberReactionsG, sizeof(unsigned int)); //hipMemcpy(numberReactionsG, &numberReactions, sizeof(unsigned int), hipMemcpyHostToDevice); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(numberReactionsC, &numberReactions, sizeof(unsigned int))); hipMalloc(&reactionOrdersG, numberReactions*sizeof(unsigned int)); hipMemcpy(reactionOrdersG, reactionOrders, numberReactions*sizeof(unsigned int), hipMemcpyHostToDevice); hipMalloc(&reactionSitesG, numberReactions*sizeof(unsigned int)); hipMemcpy(reactionSitesG, reactionSites, numberReactions*sizeof(unsigned int), hipMemcpyHostToDevice); hipMalloc(&D1G, numberReactions*sizeof(unsigned int)); hipMemcpy(D1G, D1, numberReactions*sizeof(unsigned int), hipMemcpyHostToDevice); hipMalloc(&D2G, numberReactions*sizeof(unsigned int)); hipMemcpy(D2G, D2, numberReactions*sizeof(unsigned int), hipMemcpyHostToDevice); #else // Copy the reaction model and S matrix to constant memory on the GPU. CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(numberReactionsC, &numberReactions, sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(reactionOrdersC, reactionOrders, numberReactions*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(reactionSitesC, reactionSites, numberReactions*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(D1C, D1, numberReactions*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(D2C, D2, numberReactions*sizeof(unsigned int))); #endif #ifdef MPD_GLOBAL_S_MATRIX // If S matrix stored in global memory, allocate space and perform copy hipMalloc(&SG, numberSpecies*numberReactions * sizeof(int8_t)); hipMemcpy(SG, tmpS, numberSpecies*numberReactions * sizeof(int8_t), hipMemcpyHostToDevice); #else // S matrix is in constant memory CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(SC, tmpS, numberSpecies*numberReactions*sizeof(int8_t))); #endif // Free any temporary resources. delete [] reactionSites; delete [] D1; delete [] D2; delete [] reactionOrders; delete [] tmpS; } void MpdRdmeSolver::buildDiffusionModel(const uint numberSiteTypesA, const double * DFA, const uint * RLA, lattice_size_t latticeXSize, lattice_size_t latticeYSize, lattice_size_t latticeZSize, site_size_t particlesPerSite, const unsigned int bytes_per_particle, si_dist_t latticeSpacing, const uint8_t * latticeData, const uint8_t * latticeSitesData, bool rowMajorData) { RDMESolver::buildDiffusionModel(numberSiteTypesA, DFA, RLA, latticeXSize, latticeYSize, latticeZSize, particlesPerSite, bytes_per_particle, latticeSpacing, latticeData, latticeSitesData, rowMajorData); // Get the time step. tau=atof((*parameters)["timestep"].c_str()); if (tau <= 0.0) throw InvalidArgException("timestep", "A positive timestep must be specified for the solver."); maxSiteCounts.resize(numberSiteTypes); currentMaxSiteCounts.resize(numberSiteTypes); std::fill(maxSiteCounts.begin(), maxSiteCounts.end(), 0); maxParticleCounts.resize(numberSpecies+1); currentMaxParticleCounts.resize(numberSpecies+1); std::fill(maxParticleCounts.begin(), maxParticleCounts.end(), 0); // Setup the cuda transition matrix. const size_t DFmatrixSize = numberSpecies*numberSiteTypes*numberSiteTypes; #ifndef MPD_GLOBAL_T_MATRIX if (DFmatrixSize > MPD_MAX_TRANSITION_TABLE_ENTRIES) throw Exception("The number of transition table entries exceeds the maximum supported by the solver."); #endif #ifndef MPD_GLOBAL_S_MATRIX if (numberReactions*numberSiteTypes > MPD_MAX_RL_MATRIX_ENTRIES) throw Exception("The number of RL matrix entries exceeds the maximum supported by the solver."); #endif // Calculate the probability from the diffusion coefficient and the lattice properties. /* * p0 = probability of staying at the site, q = probability of moving in plus or minus direction * * D=(1-p0)*lambda^2/2*tau * q=(1-p0)/2 * D=2q*lambda^2/2*tau * q=D*tau/lambda^2 */ float * T = new float[DFmatrixSize]; for (uint i=0; i<DFmatrixSize; i++) { float q=(float)(DF[i]*tau/pow(latticeSpacing,2)); if (q > 0.50f) throw InvalidArgException("D", "The specified diffusion coefficient is too high for the diffusion model."); T[i] = q; } // Setup the cuda reaction location matrix. uint8_t * tmpRL = new uint8_t[numberReactions*numberSiteTypes]; for (uint i=0; i<numberReactions*numberSiteTypes; i++) { tmpRL[i] = RL[i]; } // Copy the diffusion model to constant memory on the GPU. #ifdef MPD_GLOBAL_T_MATRIX CUDA_EXCEPTION_CHECK(hipMalloc(&TG, DFmatrixSize*sizeof(float))); CUDA_EXCEPTION_CHECK(hipMemcpy(TG, T, DFmatrixSize*sizeof(float), hipMemcpyHostToDevice)); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(TC, &TG, sizeof(float*))); #else CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(TC, T, DFmatrixSize*sizeof(float))); #endif CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(numberSpeciesC, &numberSpecies, sizeof(numberSpeciesC))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(numberSiteTypesC, &numberSiteTypes, sizeof(numberSiteTypesC))); const unsigned int latticeXYSize = latticeXSize*latticeYSize; const unsigned int latticeXYZSize = latticeXSize*latticeYSize*latticeZSize; CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(latticeXSizeC, &latticeXSize, sizeof(latticeYSizeC))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(latticeYSizeC, &latticeYSize, sizeof(latticeYSizeC))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(latticeZSizeC, &latticeZSize, sizeof(latticeZSizeC))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(global_latticeZSizeC, &latticeZSize, sizeof(latticeZSizeC))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(latticeXYSizeC, &latticeXYSize, sizeof(latticeXYSizeC))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(latticeXYZSizeC, &latticeXYZSize, sizeof(latticeXYZSizeC))); CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(global_latticeXYZSizeC, &latticeXYZSize, sizeof(latticeXYZSizeC))); #ifdef MPD_GLOBAL_S_MATRIX // Store RL in global memory too, since I'm going to assume if S is too big, then RL is too. hipMalloc(&RLG, numberReactions*numberSiteTypes * sizeof(uint8_t)); hipMemcpy(RLG, tmpRL, numberReactions*numberSiteTypes * sizeof(uint8_t), hipMemcpyHostToDevice); #else // RL is stored in constant memory CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(RLC, tmpRL, numberReactions*numberSiteTypes*sizeof(uint8_t))); #endif delete [] tmpRL; delete [] T; // Set the cuda reaction model rates now that we have the subvolume size. float * reactionRates = new float[numberReactions]; for (uint i=0; i<numberReactions; i++) { if (reactionTypes[i] == ZerothOrderPropensityArgs::REACTION_TYPE) { reactionRates[i] = ((ZerothOrderPropensityArgs *)propensityFunctionArgs[i])->k*tau/(latticeXSize*latticeYSize*latticeZSize); } else if (reactionTypes[i] == FirstOrderPropensityArgs::REACTION_TYPE) { reactionRates[i] = ((FirstOrderPropensityArgs *)propensityFunctionArgs[i])->k*tau; } else if (reactionTypes[i] == SecondOrderPropensityArgs::REACTION_TYPE) { reactionRates[i] = ((SecondOrderPropensityArgs *)propensityFunctionArgs[i])->k*tau*latticeXSize*latticeYSize*latticeZSize; } else if (reactionTypes[i] == SecondOrderSelfPropensityArgs::REACTION_TYPE) { reactionRates[i] = ((SecondOrderSelfPropensityArgs *)propensityFunctionArgs[i])->k*tau*latticeXSize*latticeYSize*latticeZSize; } else { throw InvalidArgException("reactionTypeA", "the reaction type was not supported by the solver", reactionTypes[i]); } } #ifdef MPD_GLOBAL_R_MATRIX hipMalloc(&reactionRatesG, numberReactions*sizeof(float)); hipMemcpy(reactionRatesG, reactionRates, numberReactions*sizeof(float), hipMemcpyHostToDevice); #else CUDA_EXCEPTION_CHECK(hipMemcpyToSymbol(reactionRatesC, reactionRates, numberReactions*sizeof(float))); #endif delete [] reactionRates; // set up pre-configured propensity matricies size_t zeroOrderSize=numberSiteTypes; size_t firstOrderSize=numberSpecies*numberSiteTypes; size_t secondOrderSize=numberSpecies*numberSpecies*numberSiteTypes; float *zeroOrder=new float[zeroOrderSize]; float *firstOrder=new float[firstOrderSize]; float *secondOrder=new float[secondOrderSize]; float scale=latticeXSize*latticeYSize*latticeZSize; for(uint site=0; site<numberSiteTypes; site++) { uint o1=site*numberSpecies; uint o2=site*numberSpecies*numberSpecies; zeroOrder[site]=0.0f; for (uint i=0; i<numberSpecies; i++) { firstOrder[o1 + i]=0.0f; for (uint j=0; j<numberSpecies; j++) secondOrder[o2 + i*numberSpecies + j]=0.0f; } for (uint i=0; i<numberReactions; i++) { if(! RL[i*numberSiteTypes + site]) continue; switch(reactionTypes[i]) { case ZerothOrderPropensityArgs::REACTION_TYPE: { ZerothOrderPropensityArgs *rx=(ZerothOrderPropensityArgs *)propensityFunctionArgs[i]; zeroOrder[site]+=rx->k*tau/scale; } break; case FirstOrderPropensityArgs::REACTION_TYPE: { FirstOrderPropensityArgs *rx=(FirstOrderPropensityArgs *)propensityFunctionArgs[i]; firstOrder[o1 + rx->si]+=rx->k*tau; } break; case SecondOrderPropensityArgs::REACTION_TYPE: { SecondOrderPropensityArgs *rx=(SecondOrderPropensityArgs *)propensityFunctionArgs[i]; secondOrder[o2 + (rx->s1i) * numberSpecies + (rx->s2i)]=rx->k*tau*scale; secondOrder[o2 + (rx->s2i) * numberSpecies + (rx->s1i)]=rx->k*tau*scale; } break; case SecondOrderSelfPropensityArgs::REACTION_TYPE: { SecondOrderSelfPropensityArgs *rx=(SecondOrderSelfPropensityArgs *)propensityFunctionArgs[i]; secondOrder[o2 + (rx->si) * numberSpecies + (rx->si)]+=rx->k*tau*scale*2; } } } } hipMalloc(&propZeroOrder, zeroOrderSize*sizeof(float)); hipMemcpy(propZeroOrder, zeroOrder, zeroOrderSize*sizeof(float), hipMemcpyHostToDevice); hipMalloc(&propFirstOrder, firstOrderSize*sizeof(float)); hipMemcpy(propFirstOrder, firstOrder, firstOrderSize*sizeof(float), hipMemcpyHostToDevice); hipMalloc(&propSecondOrder, secondOrderSize*sizeof(float)); hipMemcpy(propSecondOrder, secondOrder, secondOrderSize*sizeof(float), hipMemcpyHostToDevice); delete[] zeroOrder; delete[] firstOrder; delete[] secondOrder; } void MpdRdmeSolver::generateTrajectory() { // Shadow the lattice member as a cuda lattice. CudaByteLattice * lattice = (CudaByteLattice *)this->lattice; // Get the interval for writing species counts and lattices. double speciesCountsWriteInterval=atof((*parameters)["writeInterval"].c_str()); double nextSpeciesCountsWriteTime = speciesCountsWriteInterval; lm::io::SpeciesCounts speciesCountsDataSet; speciesCountsDataSet.set_number_species(numberSpeciesToTrack); speciesCountsDataSet.set_number_entries(0); double latticeWriteInterval=atof((*parameters)["latticeWriteInterval"].c_str()); double nextLatticeWriteTime = latticeWriteInterval; lm::io::Lattice latticeDataSet; // Get the simulation time limit. double maxTime=atof((*parameters)["maxTime"].c_str()); Print::printf(Print::INFO, "Running mpd rdme simulation with %d species, %d reactions, %d site types for %e s with tau %e. Writing species at %e and lattice at %e intervals", numberSpecies, numberReactions, numberSiteTypes, maxTime, tau, speciesCountsWriteInterval, latticeWriteInterval); // Set the initial time. double time = 0.0; double base_time = 0.0; uint32_t timestep=1; uint32_t complete_steps=0; // Simulation Hook interface bool hookEnabled=false; double nextHookTime=0.0; double hookInterval=0.0; if((*parameters)["hookInterval"] != "") { hookInterval=atof((*parameters)["hookInterval"].c_str()); hookEnabled=hookInterval >= 0.0; nextHookTime=hookInterval; } // Find out at what interval to write status messages double printPerfInterval = 60; if((*parameters)["perfPrintInterval"] != "") { printPerfInterval=atof((*parameters)["perfPrintInterval"].c_str()); } // Call beginning hook. Any modifications to the lattice will be // accounted for because we have not yet copied to device memory // thus we do not need to check return value onBeginTrajectory(lattice); // Synchronize the cuda memory. lattice->copyToGPU(); // Record the initial species counts. recordSpeciesCounts(time, lattice, &speciesCountsDataSet); // Write the initial lattice. writeLatticeData(time, lattice, &latticeDataSet); // initialize max counts from initial particle/site lattice initMaxCounts(lattice); Timer timer; timer.tick(); double lastT=0; int lastSteps=0; while (time < maxTime) { if(globalAbort) { printf("Global abort: terminating solver\n"); break; } lastT += timer.tock(); lastSteps += 1; if ( lastT >= printPerfInterval) { double stepTime = lastT/lastSteps; double completionTime = stepTime*(maxTime-time)/tau; std::string units; if (completionTime > 60*60*24*365) { units = "weeks"; completionTime /= 60*60*24*365; } else if (completionTime > 60*60*24*30) { units = "months"; completionTime /= 60*60*24*30; } else if (completionTime > 60*60*24*7) { units = "weeks"; completionTime /= 60*60*24*7; } else if (completionTime > 60*60*24) { units = "days"; completionTime /= 60*60*24; } else if (completionTime > 60*60) { units = "hours"; completionTime /= 60*60; } else if (completionTime > 60) { units = "minutes"; completionTime /= 60; } else { units = "seconds"; } Print::printf(Print::INFO, "Average walltime per timestep: %.2f ms. Progress: %.4fs/%.4fs (% .3g%% done / %.2g %s walltime remaining)", 1000.0*stepTime, time, maxTime, 100.0*time/maxTime, completionTime, units.c_str()); lastT = 0; lastSteps = 0; } timer.tick(); // Run the next timestep. runTimestep(lattice, timestep++); complete_steps++; // Update the time. time = base_time + complete_steps*tau; // Check if we need to execute the hook if (hookEnabled && time >= nextHookTime-EPS) { lattice->copyFromGPU(); switch(hookSimulation(time, lattice)) { case 0: break; case 1: lattice->copyToGPU(); break; case 2: lattice->copyToGPU(); writeLatticeSites(time, lattice); break; default: throw("Unknown hook return value"); } nextHookTime += hookInterval; } // See if we need to write out the any data. if (time >= nextLatticeWriteTime-EPS || time >= nextSpeciesCountsWriteTime-EPS) { // rebase time to match how mgpu does it base_time = base_time + complete_steps*tau; complete_steps=0; // Synchronize the lattice. lattice->copyFromGPU(); // See if we need to write the lattice. if (time >= nextLatticeWriteTime-EPS) { PROF_BEGIN(PROF_SERIALIZE_LATTICE); writeLatticeData(time, lattice, &latticeDataSet); nextLatticeWriteTime += latticeWriteInterval; PROF_END(PROF_SERIALIZE_LATTICE); switch(onWriteLattice(time, lattice)) { case 0: break; case 1: lattice->copyToGPU(); break; case 2: lattice->copyToGPU(); writeLatticeSites(time, lattice); break; default: throw("Unknown hook return value"); } } // See if we need to write the species counts. if (time >= nextSpeciesCountsWriteTime-EPS) { PROF_BEGIN(PROF_DETERMINE_COUNTS); recordSpeciesCounts(time, lattice, &speciesCountsDataSet); nextSpeciesCountsWriteTime += speciesCountsWriteInterval; PROF_END(PROF_DETERMINE_COUNTS); // See if we have accumulated enough species counts to send. if (speciesCountsDataSet.number_entries() >= TUNE_SPECIES_COUNTS_BUFFER_SIZE) { PROF_BEGIN(PROF_SERIALIZE_COUNTS); writeSpeciesCounts(&speciesCountsDataSet); PROF_END(PROF_SERIALIZE_COUNTS); } } } } lattice->copyFromGPU(); onEndTrajectory(lattice); // Write any remaining species counts. writeSpeciesCounts(&speciesCountsDataSet); writeMaxCounts(); } // Do not do anything in this class; assume derivative classes will override int MpdRdmeSolver::hookSimulation(double time, CudaByteLattice *lattice) { // Overload this function in derivative classes // Return 0 if the lattice state is unchanged // Return 1 if the lattice state has been modified, // and it needs to be copied back to the GPU. // Return 2 if lattice sites have changed, and should // be copied back to the GPU *and* be recorded // in the output file return 0; } int MpdRdmeSolver::onWriteLattice(double time, CudaByteLattice *lattice) { // Overload this function in derivative classes // Return 0 if the lattice state is unchanged // Return 1 if the lattice state has been modified, // and it needs to be copied back to the GPU. // Return 2 if lattice sites have changed, and should // be copied back to the GPU *and* be recorded // in the output file return 0; } int MpdRdmeSolver::onBeginTrajectory(CudaByteLattice *lattice) { return 0; } int MpdRdmeSolver::onEndTrajectory(CudaByteLattice *lattice) { return 0; } void MpdRdmeSolver::initMaxCounts(CudaByteLattice *lattice) { uint8_t *sites = reinterpret_cast<uint8_t*>(lattice->getSitesMemory()); const size_t ns = lattice->getNumberSites(); std::fill(currentMaxSiteCounts.begin(), currentMaxSiteCounts.end(), 0); for (size_t i=0; i < ns; i++) { if (sites[i] > 0) { assert(sites[i] < currentMaxSiteCounts.size()); currentMaxSiteCounts[sites[i]]++; } } for (int i=0; i < maxSiteCounts.size(); i++) { maxSiteCounts[i] = ::max(maxSiteCounts[i], currentMaxSiteCounts[i]); } uint8_t *particles = reinterpret_cast<uint8_t*>(lattice->getParticlesMemory()); const size_t np = MPD_LATTICE_MAX_OCCUPANCY * lattice->getNumberSites(); std::fill(currentMaxParticleCounts.begin(), currentMaxParticleCounts.end(), 0); for (size_t i=0; i < np; i++) { if (particles[i] > 0) { currentMaxParticleCounts[particles[i]]++; } } for (int i=0; i < maxParticleCounts.size(); i++) { maxParticleCounts[i] = ::max(maxParticleCounts[i], currentMaxParticleCounts[i]); } } void MpdRdmeSolver::writeLatticeData(double time, CudaByteLattice * lattice, lm::io::Lattice * latticeDataSet) { Print::printf(Print::DEBUG, "Writing lattice at %e s", time); // Record the lattice data. latticeDataSet->Clear(); latticeDataSet->set_lattice_x_size(lattice->getSize().x); latticeDataSet->set_lattice_y_size(lattice->getSize().y); latticeDataSet->set_lattice_z_size(lattice->getSize().z); latticeDataSet->set_particles_per_site(lattice->getMaxOccupancy()); latticeDataSet->set_time(time); // update max counts uint8_t *particles = reinterpret_cast<uint8_t*>(lattice->getParticlesMemory()); const size_t np = MPD_LATTICE_MAX_OCCUPANCY * lattice->getNumberSites(); std::fill(currentMaxParticleCounts.begin(), currentMaxParticleCounts.end(), 0); for (size_t i=0; i < np; i++) { if (particles[i] > 0) { currentMaxParticleCounts[particles[i]]++; } } for (int i=0; i < maxParticleCounts.size(); i++) { maxParticleCounts[i] = ::max(maxParticleCounts[i], currentMaxParticleCounts[i]); } // Push it to the output queue. size_t payloadSize = size_t(lattice->getSize().x)*size_t(lattice->getSize().y)*size_t(lattice->getSize().z)*size_t(lattice->getMaxOccupancy())*sizeof(uint8_t); lm::main::DataOutputQueue::getInstance()->pushDataSet(lm::main::DataOutputQueue::BYTE_LATTICE, replicate, latticeDataSet, lattice, payloadSize, &lm::rdme::ByteLattice::nativeSerialize); } void MpdRdmeSolver::writeLatticeSites(double time, CudaByteLattice * lattice) { Print::printf(Print::DEBUG, "Writing lattice sites at %e s", time); lm::io::Lattice latticeDataSet; // Record the lattice data. latticeDataSet.Clear(); latticeDataSet.set_lattice_x_size(lattice->getSize().x); latticeDataSet.set_lattice_y_size(lattice->getSize().y); latticeDataSet.set_lattice_z_size(lattice->getSize().z); latticeDataSet.set_particles_per_site(lattice->getMaxOccupancy()); latticeDataSet.set_time(time); // update max counts uint8_t *sites = reinterpret_cast<uint8_t*>(lattice->getSitesMemory()); const size_t ns = lattice->getNumberSites(); std::fill(currentMaxSiteCounts.begin(), currentMaxSiteCounts.end(), 0); for (size_t i=0; i < ns; i++) { if (sites[i] > 0) { assert(sites[i] < currentMaxSiteCounts.size()); currentMaxSiteCounts[sites[i]]++; } } for (int i=0; i < maxSiteCounts.size(); i++) { maxSiteCounts[i] = ::max(maxSiteCounts[i], currentMaxSiteCounts[i]); } // Push it to the output queue. size_t payloadSize = lattice->getSize().x*lattice->getSize().y*lattice->getSize().z*sizeof(uint8_t); lm::main::DataOutputQueue::getInstance()->pushDataSet(lm::main::DataOutputQueue::SITE_LATTICE, replicate, &latticeDataSet, lattice, payloadSize, &lm::rdme::ByteLattice::nativeSerializeSites); } void MpdRdmeSolver::writeMaxCounts() { std::vector<size_t> shape_p(1); shape_p[0] = maxParticleCounts.size(); H5MetaData md_p = make_H5_meta<uint32_t>(H5MetaData::NEW_DATASET, shape_p, "MaxParticleCounts"); lm::main::DataOutputQueue::getInstance()->pushDataSet(md_p, replicate, &(maxParticleCounts[0])); std::vector<size_t> shape_s(1); shape_s[0] = maxSiteCounts.size(); H5MetaData md_s = make_H5_meta<uint32_t>(H5MetaData::NEW_DATASET, shape_s, "MaxSiteCounts"); lm::main::DataOutputQueue::getInstance()->pushDataSet(md_s, replicate, &(maxSiteCounts[0])); } void MpdRdmeSolver::recordSpeciesCounts(double time, CudaByteLattice * lattice, lm::io::SpeciesCounts * speciesCountsDataSet) { std::map<particle_t,uint> particleCounts = lattice->getParticleCounts(); speciesCountsDataSet->set_number_entries(speciesCountsDataSet->number_entries()+1); speciesCountsDataSet->add_time(time); for (particle_t p=0; p<numberSpeciesToTrack; p++) { speciesCountsDataSet->add_species_count((particleCounts.count(p+1)>0)?particleCounts[p+1]:0); } } void MpdRdmeSolver::writeSpeciesCounts(lm::io::SpeciesCounts * speciesCountsDataSet) { if (speciesCountsDataSet->number_entries() > 0) { // Push it to the output queue. lm::main::DataOutputQueue::getInstance()->pushDataSet(lm::main::DataOutputQueue::SPECIES_COUNTS, replicate, speciesCountsDataSet); // Reset the data set. speciesCountsDataSet->Clear(); speciesCountsDataSet->set_number_species(numberSpeciesToTrack); speciesCountsDataSet->set_number_entries(0); } } uint64_t MpdRdmeSolver::getTimestepSeed(uint32_t timestep, uint32_t substep) { uint64_t timestepHash = (((((uint64_t)seed)<<30)+timestep)<<2)+substep; timestepHash = timestepHash * 3202034522624059733ULL + 4354685564936845319ULL; timestepHash ^= timestepHash >> 20; timestepHash ^= timestepHash << 41; timestepHash ^= timestepHash >> 5; timestepHash *= 7664345821815920749ULL; return timestepHash; } void MpdRdmeSolver::runTimestep(CudaByteLattice * lattice, uint32_t timestep) { PROF_BEGIN(PROF_MPD_TIMESTEP); // Calculate some properties of the lattice. lattice_coord_t size = lattice->getSize(); const unsigned int latticeXSize = size.x; const unsigned int latticeYSize = size.y; const unsigned int latticeZSize = size.z; dim3 gridSize, threadBlockSize; // Execute the kernel for the x direction. PROF_CUDA_START(cudaStream); PROF_CUDA_BEGIN(PROF_MPD_X_DIFFUSION,cudaStream); #ifdef MPD_CUDA_3D_GRID_LAUNCH calculateXLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_x_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,0), (unsigned int*)cudaOverflowList))); #else unsigned int gridXSize; calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, TUNE_MPD_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_x_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), gridXSize, getTimestepSeed(timestep,0), (unsigned int*)cudaOverflowList))); #endif PROF_CUDA_END(PROF_MPD_X_DIFFUSION,cudaStream); lattice->swapSrcDest(); // Execute the kernel for the y direction. PROF_CUDA_BEGIN(PROF_MPD_Y_DIFFUSION,cudaStream); #ifdef MPD_CUDA_3D_GRID_LAUNCH calculateYLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_Y_BLOCK_X_SIZE, TUNE_MPD_Y_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_y_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,1), (unsigned int*)cudaOverflowList))); #else calculateYLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, TUNE_MPD_Y_BLOCK_X_SIZE, TUNE_MPD_Y_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_y_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), gridXSize, getTimestepSeed(timestep,1), (unsigned int*)cudaOverflowList))); #endif PROF_CUDA_END(PROF_MPD_Y_DIFFUSION,cudaStream); lattice->swapSrcDest(); // Execute the kernel for the z direction. PROF_CUDA_BEGIN(PROF_MPD_Z_DIFFUSION,cudaStream); #ifdef MPD_CUDA_3D_GRID_LAUNCH calculateZLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_Z_BLOCK_X_SIZE, TUNE_MPD_Z_BLOCK_Z_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_z_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,2), (unsigned int*)cudaOverflowList))); #else calculateZLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, TUNE_MPD_Z_BLOCK_X_SIZE, TUNE_MPD_Z_BLOCK_Z_SIZE, latticeXSize, latticeYSize, latticeZSize); hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_z_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), gridXSize, getTimestepSeed(timestep,2), (unsigned int*)cudaOverflowList))); #endif PROF_CUDA_END(PROF_MPD_Z_DIFFUSION,cudaStream); lattice->swapSrcDest(); if (numberReactions > 0) { // Execute the kernel for the reaction, this kernel updates the lattice in-place, so only the src pointer is passed. PROF_CUDA_BEGIN(PROF_MPD_REACTION,cudaStream); #ifdef MPD_CUDA_3D_GRID_LAUNCH calculateReactionLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_REACTION_BLOCK_X_SIZE, TUNE_MPD_REACTION_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX #ifdef MPD_FREAKYFAST hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::precomp_reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG, propZeroOrder, propFirstOrder, propSecondOrder))); #else hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG))); #endif #else #ifdef MPD_FREAKYFAST hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::precomp_reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG, propZeroOrder, propFirstOrder, propSecondOrder))); #else hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG))); #endif #endif #else hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList))); #endif #else calculateReactionLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, TUNE_MPD_REACTION_BLOCK_X_SIZE, TUNE_MPD_REACTION_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); #ifdef MPD_GLOBAL_S_MATRIX hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), gridXSize, getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG))); #else hipLaunchKernelGGL(( CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel), dim3(gridSize),dim3(threadBlockSize),0,cudaStream, (unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), gridXSize, getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList))); #endif #endif PROF_CUDA_END(PROF_MPD_REACTION,cudaStream); } if(overflow_handling == OVERFLOW_MODE_RELAXED) hipLaunchKernelGGL(( mpdrdme_dev::correct_overflows), dim3(dim3(1,1,1)), dim3(dim3(TUNE_MPD_MAX_PARTICLE_OVERFLOWS,1,1)),0,cudaStream, (unsigned int*)lattice->getGPUMemorySrc(), (unsigned int*)cudaOverflowList); // Wait for the kernels to complete. PROF_BEGIN(PROF_MPD_SYNCHRONIZE); CUDA_EXCEPTION_CHECK(hipStreamSynchronize(cudaStream)); PROF_END(PROF_MPD_SYNCHRONIZE); if(overflow_handling == OVERFLOW_MODE_RELAXED) { uint unresolved = ((unsigned int*)cudaOverflowList)[0]; if (unresolved > 0) { Print::printf(Print::WARNING, "%d unresolved overflows", unresolved); } } if(overflow_handling == OVERFLOW_MODE_CLASSIC) { // Handle any particle overflows. PROF_BEGIN(PROF_MPD_OVERFLOW); overflowTimesteps++; #ifndef MPD_MAPPED_OVERFLOWS uint32_t overflowList[1+2*TUNE_MPD_MAX_PARTICLE_OVERFLOWS]; CUDA_EXCEPTION_CHECK(hipMemcpy(overflowList, cudaOverflowList, MPD_OVERFLOW_LIST_SIZE, hipMemcpyDeviceToHost)); #else uint32_t *overflowList = (uint32_t*)cudaOverflowList; #endif uint numberExceptions = overflowList[0]; if (numberExceptions > 0) { Print::printf(Print::DEBUG, "%d overflows", numberExceptions); // Make sure we did not exceed the overflow buffer. if (numberExceptions > TUNE_MPD_MAX_PARTICLE_OVERFLOWS) throw Exception("Too many particle overflows for the available buffer", numberExceptions); // Synchronize the lattice. lattice->copyFromGPU(); // Go through each exception. for (uint i=0; i<numberExceptions; i++) { // Extract the index and particle type. lattice_size_t latticeIndex = overflowList[(i*2)+1]; particle_t particle = overflowList[(i*2)+2]; // Get the x, y, and z coordiantes. lattice_size_t x = latticeIndex%lattice->getXSize(); lattice_size_t y = (latticeIndex/lattice->getXSize())%lattice->getYSize(); lattice_size_t z = latticeIndex/(lattice->getXSize()*lattice->getYSize()); // Put the particles back into a nearby lattice site. bool replacedParticle = false; for (uint searchRadius=0; !replacedParticle && searchRadius <= TUNE_MPD_MAX_OVERFLOW_REPLACEMENT_DIST; searchRadius++) { // Get the nearby sites. std::vector<lattice_coord_t> sites = lattice->getNearbySites(x,y,z,(searchRadius>0)?searchRadius-1:0,searchRadius); // TODO: Shuffle the sites. // Try to find one that in not fully occupied and of the same type. for (std::vector<lattice_coord_t>::iterator it=sites.begin(); it<sites.end(); it++) { lattice_coord_t site = *it; if (lattice->getOccupancy(site.x,site.y,site.z) < lattice->getMaxOccupancy() && lattice->getSiteType(site.x,site.y,site.z) == lattice->getSiteType(x,y,z)) { lattice->addParticle(site.x, site.y, site.z, particle); replacedParticle = true; Print::printf(Print::VERBOSE_DEBUG, "Handled overflow of particle %d at site %d,%d,%d type=%d occ=%d by placing at site %d,%d,%d type=%d newocc=%d dist=%0.2f", particle, x, y, z, lattice->getSiteType(x,y,z), lattice->getOccupancy(x,y,z), site.x, site.y, site.z, lattice->getSiteType(site.x,site.y,site.z), lattice->getOccupancy(site.x,site.y,site.z), sqrt(pow((double)x-(double)site.x,2.0)+pow((double)y-(double)site.y,2.0)+pow((double)z-(double)site.z,2.0))); break; } } } // If we were not able to fix the exception, throw an error. if (!replacedParticle) throw Exception("Unable to find an available site to handle a particle overflow."); } // Copy the changes back to the GPU. lattice->copyToGPU(); // Reset the overflow list. CUDA_EXCEPTION_CHECK(hipMemset(cudaOverflowList, 0, MPD_OVERFLOW_LIST_SIZE)); // Track that we used the overflow list. overflowListUses++; } // If the overflow lsit is being used too often, print a warning. if (overflowTimesteps >= 1000) { if (overflowListUses > 10) Print::printf(Print::WARNING, "%d uses of the particle overflow list in the last 1000 timesteps, performance may be degraded.", overflowListUses); overflowTimesteps = 0; overflowListUses = 0; } PROF_END(PROF_MPD_OVERFLOW); } // End classic overflow PROF_CUDA_FINISH(cudaStream); PROF_END(PROF_MPD_TIMESTEP); } #ifdef MPD_CUDA_3D_GRID_LAUNCH /** * Gets the launch parameters for launching an x diffusion kernel. */ void MpdRdmeSolver::calculateXLaunchParameters(dim3 * gridSize, dim3 * threadBlockSize, const unsigned int maxXBlockSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { unsigned int xBlockXSize = min(maxXBlockSize,latticeXSize); unsigned int gridXSize = latticeXSize/xBlockXSize; if (gridXSize*xBlockXSize != latticeXSize) { // Find the largest number of warps that is divisible unsigned int tryx=32; while(tryx < maxXBlockSize) { if (latticeXSize % tryx == 0) xBlockXSize = tryx; tryx +=32; } gridXSize = latticeXSize/xBlockXSize; } (*gridSize).x = gridXSize; (*gridSize).y = latticeYSize; (*gridSize).z = latticeZSize; (*threadBlockSize).x = xBlockXSize; (*threadBlockSize).y = 1; (*threadBlockSize).z = 1; } /** * Gets the launch parameters for launching a y diffusion kernel. */ void MpdRdmeSolver::calculateYLaunchParameters(dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { (*gridSize).x = latticeXSize/blockXSize; (*gridSize).y = latticeYSize/blockYSize; (*gridSize).z = latticeZSize; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = blockYSize; (*threadBlockSize).z = 1; } /** * Gets the launch parameters for launching a z diffusion kernel. */ void MpdRdmeSolver::calculateZLaunchParameters(dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockZSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { (*gridSize).x = latticeXSize/blockXSize; (*gridSize).y = latticeYSize; (*gridSize).z = latticeZSize/blockZSize; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = 1; (*threadBlockSize).z = blockZSize; } /** * Gets the launch parameters for launching a y diffusion kernel. */ void MpdRdmeSolver::calculateReactionLaunchParameters(dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { (*gridSize).x = latticeXSize/blockXSize; (*gridSize).y = latticeYSize/blockYSize; (*gridSize).z = latticeZSize; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = blockYSize; (*threadBlockSize).z = 1; } #else /** * Gets the launch parameters for launching an x diffusion kernel. */ void MpdRdmeSolver::calculateXLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int maxXBlockSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { unsigned int xBlockXSize = min(maxXBlockSize,latticeXSize); *gridXSize = latticeXSize/xBlockXSize; if (gridXSize*xBlockXSize != latticeXSize) { // Find the largest number of warps that is divisible unsigned int tryx=32; while(tryx < maxXBlockSize) { if (latticeXSize % tryx == 0) xBlockXSize = tryx; tryx +=32; } gridXSize = latticeXSize/xBlockXSize; } (*gridSize).x = (*gridXSize)*latticeYSize; (*gridSize).y = latticeZSize; (*gridSize).z = 1; (*threadBlockSize).x = xBlockXSize; (*threadBlockSize).y = 1; (*threadBlockSize).z = 1; } /** * Gets the launch parameters for launching a y diffusion kernel. */ void MpdRdmeSolver::calculateYLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { *gridXSize = latticeXSize/blockXSize; (*gridSize).x = (*gridXSize)*(latticeYSize/blockYSize); (*gridSize).y = latticeZSize; (*gridSize).z = 1; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = blockYSize; (*threadBlockSize).z = 1; } /** * Gets the launch parameters for launching a z diffusion kernel. */ void MpdRdmeSolver::calculateZLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockZSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { *gridXSize = latticeXSize/blockXSize; (*gridSize).x = (*gridXSize)*(latticeYSize); (*gridSize).y = latticeZSize/blockZSize; (*gridSize).z = 1; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = 1; (*threadBlockSize).z = blockZSize; } /** * Gets the launch parameters for launching a reaction diffusion kernel. */ void MpdRdmeSolver::calculateReactionLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { *gridXSize = latticeXSize/blockXSize; (*gridSize).x = (*gridXSize)*(latticeYSize/blockYSize); (*gridSize).y = latticeZSize; (*gridSize).z = 1; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = blockYSize; (*threadBlockSize).z = 1; } #endif namespace mpdrdme_dev { /** * Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice * site, storing the new lattice into shared memory, and then updating the global lattice. */ #ifdef MPD_CUDA_3D_GRID_LAUNCH __global__ void __launch_bounds__(TUNE_MPD_X_BLOCK_MAX_X_SIZE,1) mpd_x_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList) { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; #else __global__ void __launch_bounds__(TUNE_MPD_X_BLOCK_MAX_X_SIZE,1) mpd_x_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); #endif // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x; unsigned int latticeIndex = (bz*latticeXYSizeC) + (by*latticeXSizeC) + latticeXIndex; unsigned int windowIndex = threadIdx.x+MPD_APRON_SIZE; /////////////////////////////////////////// // Load the lattice into shared memory. // /////////////////////////////////////////// // Shared memory to store the lattice segment. __shared__ unsigned int window[MPD_X_WINDOW_SIZE*MPD_WORDS_PER_SITE]; __shared__ uint8_t sitesWindow[MPD_X_WINDOW_SIZE]; // Copy the x window from device memory into shared memory. copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, windowIndex); copyXWindowFromSites(bx, inSites, sitesWindow, latticeIndex, latticeXIndex, windowIndex); __syncthreads(); //////////////////////////////////////// // Make the choice for each particle. // //////////////////////////////////////// __shared__ unsigned int choices[MPD_X_WINDOW_SIZE*MPD_WORDS_PER_SITE]; // Make the choices. makeXDiffusionChoices(window, sitesWindow, choices, latticeIndex, latticeXIndex, windowIndex, blockDim.x, timestepHash); __syncthreads(); ////////////////////////////////////////////////////////// // Create version of the lattice at the next time step. // ////////////////////////////////////////////////////////// // Propagate the choices to the new lattice segment. performPropagation(outLattice, window, choices, latticeIndex, windowIndex-1, windowIndex, windowIndex+1, MPD_X_WINDOW_SIZE, siteOverflowList); } /** * Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice * site, storing the new lattice into shared memory, and then updating the global lattice. */ #ifdef MPD_CUDA_3D_GRID_LAUNCH __global__ void __launch_bounds__(TUNE_MPD_Y_BLOCK_X_SIZE*TUNE_MPD_Y_BLOCK_Y_SIZE,1) mpd_y_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList) { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; #else __global__ void __launch_bounds__(TUNE_MPD_Y_BLOCK_X_SIZE*TUNE_MPD_Y_BLOCK_Y_SIZE,1) mpd_y_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); #endif // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y; unsigned int latticeIndex = (bz*latticeXYSizeC) + (latticeYIndex*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; unsigned int windowYIndex = threadIdx.y+MPD_APRON_SIZE; unsigned int windowIndex = (windowYIndex*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the lattice into shared memory. // /////////////////////////////////////////// // Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle. __shared__ unsigned int window[MPD_Y_WINDOW_SIZE*MPD_WORDS_PER_SITE]; __shared__ uint8_t sitesWindow[MPD_Y_WINDOW_SIZE]; // Copy the x window from device memory into shared memory. copyYWindowFromLattice(inLattice, window, latticeIndex, latticeYIndex, windowIndex, windowYIndex); copyYWindowFromSites(inSites, sitesWindow, latticeIndex, latticeYIndex, windowIndex, windowYIndex); __syncthreads(); //////////////////////////////////////// // Make the choice for each particle. // //////////////////////////////////////// __shared__ unsigned int choices[MPD_Y_WINDOW_SIZE*MPD_WORDS_PER_SITE]; // Make the choices. makeYDiffusionChoices(window, sitesWindow, choices, latticeIndex, latticeYIndex, windowIndex, windowYIndex, timestepHash); __syncthreads(); ////////////////////////////////////////////////////////// // Create version of the lattice at the next time step. // ////////////////////////////////////////////////////////// // Progate the choices to the new lattice segment. performPropagation(outLattice, window, choices, latticeIndex, windowIndex-TUNE_MPD_Y_BLOCK_X_SIZE, windowIndex, windowIndex+TUNE_MPD_Y_BLOCK_X_SIZE, MPD_Y_WINDOW_SIZE, siteOverflowList); } /** * Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice * site, storing the new lattice into shared memory, and then updating the global lattice. */ #ifdef MPD_CUDA_3D_GRID_LAUNCH __global__ void __launch_bounds__(TUNE_MPD_Z_BLOCK_X_SIZE*TUNE_MPD_Z_BLOCK_Z_SIZE,1) mpd_z_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList) { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; #else __global__ void __launch_bounds__(TUNE_MPD_Z_BLOCK_X_SIZE*TUNE_MPD_Z_BLOCK_Z_SIZE,1) mpd_z_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); #endif // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSizeC) + (by*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; unsigned int windowZIndex = threadIdx.z+MPD_APRON_SIZE; unsigned int windowIndex = (windowZIndex*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the lattice into shared memory. // /////////////////////////////////////////// // Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle. __shared__ unsigned int window[MPD_Z_WINDOW_SIZE*MPD_WORDS_PER_SITE]; __shared__ uint8_t sitesWindow[MPD_Z_WINDOW_SIZE]; // Copy the x window from device memory into shared memory. copyZWindowFromLattice(inLattice, window, latticeIndex, latticeZIndex, windowIndex, windowZIndex); copyZWindowFromSites(inSites, sitesWindow, latticeIndex, latticeZIndex, windowIndex, windowZIndex); __syncthreads(); //////////////////////////////////////// // Make the choice for each particle. // //////////////////////////////////////// __shared__ unsigned int choices[MPD_Z_WINDOW_SIZE*MPD_WORDS_PER_SITE]; // Make the choices. makeZDiffusionChoices(window, sitesWindow, choices, latticeIndex, latticeZIndex, windowIndex, windowZIndex, timestepHash); __syncthreads(); ////////////////////////////////////////////////////////// // Create version of the lattice at the next time step. // ////////////////////////////////////////////////////////// // Progate the choices to the new lattice segment. performPropagation(outLattice, window, choices, latticeIndex, windowIndex-TUNE_MPD_Z_BLOCK_X_SIZE, windowIndex, windowIndex+TUNE_MPD_Z_BLOCK_X_SIZE, MPD_Z_WINDOW_SIZE, siteOverflowList); } /** * Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice * site, storing the new lattice into shared memory, and then updating the global lattice. */ #ifdef MPD_CUDA_3D_GRID_LAUNCH #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const unsigned int* __restrict__ reactionOrdersG, const unsigned int* __restrict__ reactionSitesG, const unsigned int* __restrict__ D1G, const unsigned int* __restrict__ D2G, const float* __restrict__ reactionRatesG) #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG) #endif #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList) #endif { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; #else #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const unsigned int* __restrict__ reactionOrdersG, const unsigned int* __restrict__ reactionSitesG, const unsigned int* __restrict__ D1G, const unsigned int* __restrict__ D2G, const float* __restrict__ reactionRatesG) #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG) #endif #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList) #endif { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); #endif // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y; unsigned int latticeIndex = (bz*latticeXYSizeC) + (latticeYIndex*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the particles and site. // /////////////////////////////////////////// unsigned int particles[MPD_WORDS_PER_SITE]; for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) particles[w] = inLattice[latticeIndex+latticeOffset]; uint8_t siteType = inSites[latticeIndex]; //////////////////////////////////////// // Perform the reactions. // //////////////////////////////////////// // Calculate the kinetic rate for each reaction at this site. float totalReactionPropensity = 0.0f; for (int i=0; i<numberReactionsC; i++) { #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX totalReactionPropensity += calculateReactionPropensity(siteType, (uint8_t*)particles, i, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG); #else totalReactionPropensity += calculateReactionPropensity(siteType, (uint8_t*)particles, i, RLG); #endif #else totalReactionPropensity += calculateReactionPropensity(siteType, (uint8_t*)particles, i); #endif } // If propensity is zero, no reaction can occur. if(totalReactionPropensity == 0.0f) return; // See if a reaction occurred at the site. float reactionProbability = calculateReactionProbability(totalReactionPropensity); unsigned int reactionOccurred = checkForReaction(latticeIndex, reactionProbability, timestepHash); // If there was a reaction, process it. if (reactionOccurred) { // Figure out which reaction occurred. #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG); #else unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash, RLG); #endif #else unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash); #endif // Construct the new site. #ifdef MPD_GLOBAL_S_MATRIX evaluateReaction(latticeIndex, siteType, (uint8_t*)particles, reactionIndex, siteOverflowList, SG); #else evaluateReaction(latticeIndex, siteType, (uint8_t*)particles, reactionIndex, siteOverflowList); #endif // Copy the new particles back into the lattice. for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) outLattice[latticeIndex+latticeOffset] = particles[w]; } } #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) precomp_reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const unsigned int* __restrict__ reactionOrdersG, const unsigned int* __restrict__ reactionSitesG, const unsigned int* __restrict__ D1G, const unsigned int* __restrict__ D2G, const float* __restrict__ reactionRatesG, const float* __restrict__ qp0, const float* __restrict__ qp1, const float* __restrict__ qp2) #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) precomp_reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const float* __restrict__ qp0, const float* __restrict__ qp1, const float* __restrict__ qp2) #endif #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) precomp_reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const float* __restrict__ qp0, const float* __restrict__ qp1, const float* __restrict__ qp2) #endif { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y; unsigned int latticeIndex = (bz*latticeXYSizeC) + (latticeYIndex*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the particles and site. // /////////////////////////////////////////// unsigned int particles[MPD_WORDS_PER_SITE]; for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) particles[w] = inLattice[latticeIndex+latticeOffset]; uint8_t siteType = inSites[latticeIndex]; //////////////////////////////////////// // Perform the reactions. // //////////////////////////////////////// // Calculate the kinetic rate for each reaction at this site. float totalReactionPropensity = read_element(qp0,siteType); //float totalReactionPropensity = qp0[siteType]; for(uint i=0; i<MPD_PARTICLES_PER_SITE; i++) { uint8_t p1=((uint8_t*)particles)[i]; if(p1 > 0) { totalReactionPropensity += read_element(qp1,(siteType * numberSpeciesC + (p1-1))); //totalReactionPropensity += qp1[(siteType * numberSpeciesC + (p1-1))]; for(uint j=i+1; j<MPD_PARTICLES_PER_SITE; j++) { uint8_t p2=((uint8_t*)particles)[j]; if(p2 > 0) { totalReactionPropensity += read_element(qp2,siteType*numberSpeciesC*numberSpeciesC + (p1-1)*numberSpeciesC + (p2-1)); //totalReactionPropensity += qp2[siteType*numberSpeciesC*numberSpeciesC + (p1-1)*numberSpeciesC + (p2-1)]; } } } } // If propensity is zero, no reaction can occur. if(totalReactionPropensity == 0.0f) return; // See if a reaction occurred at the site. float reactionProbability = calculateReactionProbability(totalReactionPropensity); unsigned int reactionOccurred = checkForReaction(latticeIndex, reactionProbability, timestepHash); // If there was a reaction, process it. if (reactionOccurred) { // Figure out which reaction occurred. #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG); #else unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash, RLG); #endif #else unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash); #endif // Construct the new site. #ifdef MPD_GLOBAL_S_MATRIX evaluateReaction(latticeIndex, siteType, (uint8_t*)particles, reactionIndex, siteOverflowList, SG); #else evaluateReaction(latticeIndex, siteType, (uint8_t*)particles, reactionIndex, siteOverflowList); #endif // Copy the new particles back into the lattice. for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) outLattice[latticeIndex+latticeOffset] = particles[w]; } } __global__ void correct_overflows(unsigned int* lattice, unsigned int* siteOverflowList) { // Remember: #define MPD_OVERFLOW_LIST_SIZE 1+2*TUNE_MPD_MAX_PARTICLE_OVERFLOWS*sizeof(uint32_t) // Requirement: one block with TUNE_MPD_MAX_PARTICLE_OVERFLOWS threads const unsigned int i = threadIdx.x; const unsigned int total = siteOverflowList[0]; __shared__ unsigned int indexes[TUNE_MPD_MAX_PARTICLE_OVERFLOWS]; __shared__ unsigned int maxround; // Do I have an overflow to look at? if(i >= total) return; // Abort if overflows have overflown if(threadIdx.x == 0) assert(total < TUNE_MPD_MAX_PARTICLE_OVERFLOWS); // load our index lattice_size_t latticeIndex = siteOverflowList[(i*2)+1]; particle_t particle = siteOverflowList[(i*2)+2]; indexes[i] = latticeIndex; // zero out list maxround=0; __syncthreads(); siteOverflowList[0]=0; // Discover which round I should go in. To prevent situations where two threads // will try and add a particle to the same site at the same time, and thus // negating the others, each thread determines what round it is allowed to make // edits in, by determining how many previous overflows are occuring at the // same index. int round=0; for(int j=0; j < i; j++) { if(indexes[j] == latticeIndex) round++; } atomicMax(&maxround, round); __syncthreads(); for(int r=0; r <= maxround; r++) { if(round == r) { unsigned int particles[MPD_WORDS_PER_SITE]; for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) particles[w] = lattice[latticeIndex+latticeOffset]; uint8_t *p=(uint8_t*)particles; int ok=0; for(int pi=0; pi<MPD_PARTICLES_PER_SITE; pi++) { if(p[pi] == 0) { p[pi]=particle; ok=1; //printf("(round %d) Corrected overflow of particle %d at index %d\n", r, particle, latticeIndex); break; } } if(ok) { // Copy the new particles back into the lattice. for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) lattice[latticeIndex+latticeOffset] = particles[w]; } else { int exceptionIndex = atomicAdd(siteOverflowList, 1); siteOverflowList[(exceptionIndex*2)+1]=latticeIndex; siteOverflowList[(exceptionIndex*2)+2]=particle; //printf("(round %d) Failed to correct overflow of particle %d at index %d\n", r, particle, latticeIndex); } } __syncthreads(); } //if(i == 0) //printf("in: %d overflows, out: %d\n", total, siteOverflowList[0]); } // Sanity test kernel to compare kernel outputs __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) sanity_check(const unsigned int* L1, const unsigned int* L2) { // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y; unsigned int latticeIndex = (bz*latticeXYSizeC) + (latticeYIndex*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the particles and site. // /////////////////////////////////////////// unsigned int particles1; unsigned int particles2; for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) { particles1 = L1[latticeIndex+latticeOffset]; particles2 = L2[latticeIndex+latticeOffset]; if(particles1 != particles2) { printf("**\n ********* Sanity failure block(%d,%d,%d) thread(%d,%d,%d) w=%d L1=%8X L2=%8X\n**\n", bx,by,bz, threadIdx.x, threadIdx.y, threadIdx.z, w, particles1, particles2); } } } } } }
1ab820dee798fcdd907fb05943a2912bda0c9d14.cu
/* * University of Illinois Open Source License * Copyright 2008-2018 Luthey-Schulten Group, * Copyright 2012 Roberts Group, * All rights reserved. * * Developed by: Luthey-Schulten Group * University of Illinois at Urbana-Champaign * http://www.scs.uiuc.edu/~schulten * * Overflow algorithm in RDME solvers and CPU assignment (2012) * Developed by: Roberts Group * Johns Hopkins University * http://biophysics.jhu.edu/roberts/ * * Permission is hereby granted, free of charge, to any person obtaining a copy of * this software and associated documentation files (the Software), to deal with * the Software without restriction, including without limitation the rights to * use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies * of the Software, and to permit persons to whom the Software is furnished to * do so, subject to the following conditions: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimers. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimers in the documentation * and/or other materials provided with the distribution. * * - Neither the names of the Luthey-Schulten Group, University of Illinois at * Urbana-Champaign, the Roberts Group, Johns Hopkins University, nor the names * of its contributors may be used to endorse or promote products derived from * this Software without specific prior written permission. * * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL * THE CONTRIBUTORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR * OTHER DEALINGS WITH THE SOFTWARE. * * Author(s): Elijah Roberts, Zane Thornburg */ #include <map> #include <string> #include <cstdlib> #include "config.h" #if defined(MACOSX) #include <mach/mach_time.h> #elif defined(LINUX) #include <time.h> #endif #include "cuda/lm_cuda.h" #include "cuda/ldg.h" #include "core/Math.h" #include "core/Print.h" #include "cme/CMESolver.h" #include "DiffusionModel.pb.h" #include "Lattice.pb.h" #include "SpeciesCounts.pb.h" #include "core/DataOutputQueue.h" #include "core/ResourceAllocator.h" #include "rdme/ByteLattice.h" #include "rdme/CudaByteLattice.h" #include "rdme/MpdRdmeSolver.h" #include "rng/RandomGenerator.h" #include "lptf/Profile.h" #include <sstream> #include "core/Timer.h" #include "io/ArbitraryH5.h" #define MPD_WORDS_PER_SITE (MPD_LATTICE_MAX_OCCUPANCY / 4)// 4 #define MPD_APRON_SIZE 1 #include "cuda/constant.cuh" namespace lm { namespace rdme { namespace mpdrdme_dev { #include "rdme/dev/xor_random_dev.cu" #include "rdme/dev/lattice_sim_1d_dev.cu" #include "rdme/dev/byte_diffusion_1d_dev.cu" #include "rdme/dev/byte_reaction_dev.cu" }}} extern bool globalAbort; using std::map; using lm::io::DiffusionModel; using lm::rdme::Lattice; using lm::rng::RandomGenerator; namespace lm { namespace rdme { MpdRdmeSolver::MpdRdmeSolver() :RDMESolver(lm::rng::RandomGenerator::NONE),seed(0),cudaOverflowList(NULL),cudaStream(0),tau(0.0),overflowTimesteps(0),overflowListUses(0) { } void MpdRdmeSolver::initialize(unsigned int replicate, map<string,string> * parameters, ResourceAllocator::ComputeResources * resources) { RDMESolver::initialize(replicate, parameters, resources); // Figure out the random seed. uint32_t seedTop=(unsigned int)atoi((*parameters)["seed"].c_str()); if (seedTop == 0) { #if defined(MACOSX) seedTop = (uint32_t)mach_absolute_time(); #elif defined(LINUX) struct timespec seed_timespec; if (clock_gettime(CLOCK_REALTIME, &seed_timespec) != 0) throw lm::Exception("Error getting time to use for random seed."); seedTop = seed_timespec.tv_nsec; #endif } seed = (seedTop<<16)|(replicate&0x0000FFFF); Print::printf(Print::INFO, "MPDRDME: Rng seed: top word %u, bottom word %u", (seed>>16)*0xffff, seed&0xFFFF); string &s = (*parameters)["rdme.mpd.overflowhandler"]; if(s == "classic") { overflow_handling = OVERFLOW_MODE_CLASSIC; Print::printf(Print::DEBUG, "Overflow handler set to classic"); } else if(s == "relaxed") { overflow_handling = OVERFLOW_MODE_RELAXED; Print::printf(Print::DEBUG, "Overflow handler set to relaxed"); } else if(s == "") { overflow_handling = OVERFLOW_MODE_CLASSIC; Print::printf(Print::DEBUG, "Overflow handler set to default (classic)"); } else { overflow_handling = OVERFLOW_MODE_CLASSIC; Print::printf(Print::WARNING, "Unknown overflow handler requested: '%s'", s.c_str()); } #ifdef MPD_MAPPED_OVERFLOWS CUDA_EXCEPTION_CHECK(cudaHostAlloc(&cudaOverflowList, MPD_OVERFLOW_LIST_SIZE, cudaHostAllocPortable|cudaHostAllocMapped)); memset(cudaOverflowList, 0, MPD_OVERFLOW_LIST_SIZE); #else // Allocate memory on the device for the exception list. CUDA_EXCEPTION_CHECK(cudaMalloc(&cudaOverflowList, MPD_OVERFLOW_LIST_SIZE)); //TODO: track memory usage. CUDA_EXCEPTION_CHECK(cudaMemset(cudaOverflowList, 0, MPD_OVERFLOW_LIST_SIZE)); #endif // Create a stream for synchronizing the events. CUDA_EXCEPTION_CHECK(cudaStreamCreate(&cudaStream)); } MpdRdmeSolver::~MpdRdmeSolver() { // Free any device memory. if (cudaOverflowList != NULL) { #ifdef MPD_MAPPED_OVERFLOWS CUDA_EXCEPTION_CHECK_NOTHROW(cudaFreeHost(cudaOverflowList)); //TODO: track memory usage. #else CUDA_EXCEPTION_CHECK_NOTHROW(cudaFree(cudaOverflowList)); //TODO: track memory usage. #endif cudaOverflowList = NULL; } // If we have created a stream, destroy it. if (cudaStream != 0) { CUDA_EXCEPTION_CHECK_NOTHROW(cudaStreamDestroy(cudaStream)); cudaStream = NULL; } } void MpdRdmeSolver::allocateLattice(lattice_size_t latticeXSize, lattice_size_t latticeYSize, lattice_size_t latticeZSize, site_size_t particlesPerSite, const unsigned int bytes_per_particle, si_dist_t latticeSpacing) { assert(bytes_per_particle == 1); if(particlesPerSite != MPD_LATTICE_MAX_OCCUPANCY) { Print::printf(Print::ERROR, "requested allocation for %d particles per site is not %d", particlesPerSite, MPD_LATTICE_MAX_OCCUPANCY); throw Exception("incorrect particle density"); } if(latticeXSize % 32 != 0) { Print::printf(Print::ERROR, "The X dimension (%d) is not divisible by %d", latticeXSize, 32); throw Exception("incorrect lattice dimensions"); } if(latticeYSize % TUNE_MPD_Y_BLOCK_Y_SIZE != 0) { Print::printf(Print::ERROR, "The Y dimension (%d) is not divisible by %d", latticeYSize, TUNE_MPD_Y_BLOCK_Y_SIZE); throw Exception("incorrect lattice dimensions"); } if(latticeZSize % TUNE_MPD_Z_BLOCK_Z_SIZE != 0) { Print::printf(Print::ERROR, "The Z dimension (%d) is not divisible by %d", latticeZSize, TUNE_MPD_Z_BLOCK_Z_SIZE); throw Exception("incorrect lattice dimensions"); } try { CudaByteLattice *cbl = new CudaByteLattice(latticeXSize, latticeYSize, latticeZSize, latticeSpacing, particlesPerSite); lattice = (Lattice *)cbl; } catch (CUDAException e) { Print::printf(Print::ERROR, e.what()); throw Exception("Failed to create cuda lattice"); } } void MpdRdmeSolver::buildModel(const uint numberSpeciesA, const uint numberReactionsA, const uint * initialSpeciesCountsA, const uint * reactionTypesA, const double * KA, const int * SA, const uint * DA, const uint kCols) { CMESolver::buildModel(numberSpeciesA, numberReactionsA, initialSpeciesCountsA, reactionTypesA, KA, SA, DA, kCols); // Get the time step. tau=atof((*parameters)["timestep"].c_str()); if (tau <= 0.0) throw InvalidArgException("timestep", "A positive timestep must be specified for the solver."); // Make sure we can support the reaction model. if (numberReactions > MPD_MAX_REACTION_TABLE_ENTRIES) { std::stringstream s; s << "The number of reaction table entries (" << numberReactions <<") exceeds the maximum supported by the solver (" << MPD_MAX_REACTION_TABLE_ENTRIES << ")."; throw Exception(s.str().c_str()); } #ifndef MPD_GLOBAL_S_MATRIX if (numberSpecies*numberReactions > MPD_MAX_S_MATRIX_ENTRIES) { std::stringstream s; s << "The number of S matrix entries (" << numberSpecies*numberReactions <<") exceeds the maximum supported by the solver (" << MPD_MAX_S_MATRIX_ENTRIES << ")."; throw Exception(s.str().c_str()); } #endif // Setup the cuda reaction model. unsigned int * reactionOrders = new unsigned int[numberReactions]; unsigned int * reactionSites = new unsigned int[numberReactions]; unsigned int * D1 = new unsigned int[numberReactions]; unsigned int * D2 = new unsigned int[numberReactions]; for (uint i=0; i<numberReactions; i++) { if(reactionTypes[i] == ZerothOrderPropensityArgs::REACTION_TYPE) { reactionOrders[i] = MPD_ZERO_ORDER_REACTION; reactionSites[i] = 0; D1[i] = 0; D2[i] = 0; } else if (reactionTypes[i] == FirstOrderPropensityArgs::REACTION_TYPE) { reactionOrders[i] = MPD_FIRST_ORDER_REACTION; reactionSites[i] = 0; D1[i] = ((FirstOrderPropensityArgs *)propensityFunctionArgs[i])->si+1; D2[i] = 0; } else if (reactionTypes[i] == SecondOrderPropensityArgs::REACTION_TYPE) { reactionOrders[i] = MPD_SECOND_ORDER_REACTION; reactionSites[i] = 0; D1[i] = ((SecondOrderPropensityArgs *)propensityFunctionArgs[i])->s1i+1; D2[i] = ((SecondOrderPropensityArgs *)propensityFunctionArgs[i])->s2i+1; } else if (reactionTypes[i] == SecondOrderSelfPropensityArgs::REACTION_TYPE) { reactionOrders[i] = MPD_SECOND_ORDER_SELF_REACTION; reactionSites[i] = 0; D1[i] = ((SecondOrderSelfPropensityArgs *)propensityFunctionArgs[i])->si+1; D2[i] = 0; } else { throw InvalidArgException("reactionTypeA", "the reaction type was not supported by the solver", reactionTypes[i]); } } // Setup the cuda S matrix. // Transpose S on the device because we will want to read along the numSpecies axis // Before A1A2A3A4A5 B1B2B3B4B5 C1C2C3C4C5 ... // After A1B1C1 A2B2C2 ... /* int8_t * tmpS = new int8_t[numberSpecies*numberReactions]; for (uint i=0; i<numberSpecies*numberReactions; i++) { tmpS[i] = S[i]; } */ int8_t * tmpS = new int8_t[numberSpecies*numberReactions]; for(uint rx = 0; rx < numberReactions; rx++) { for (uint p=0; p<numberSpecies; p++) { //tmpS[numberSpecies*rx + p] = S[numberSpecies*rx + p]; tmpS[rx * numberSpecies + p] = S[numberReactions*p + rx]; } } #ifdef MPD_GLOBAL_R_MATRIX // R matrix put in global memory //cudaMalloc(&numberReactionsG, sizeof(unsigned int)); //cudaMemcpy(numberReactionsG, &numberReactions, sizeof(unsigned int), cudaMemcpyHostToDevice); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(numberReactionsC, &numberReactions, sizeof(unsigned int))); cudaMalloc(&reactionOrdersG, numberReactions*sizeof(unsigned int)); cudaMemcpy(reactionOrdersG, reactionOrders, numberReactions*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMalloc(&reactionSitesG, numberReactions*sizeof(unsigned int)); cudaMemcpy(reactionSitesG, reactionSites, numberReactions*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMalloc(&D1G, numberReactions*sizeof(unsigned int)); cudaMemcpy(D1G, D1, numberReactions*sizeof(unsigned int), cudaMemcpyHostToDevice); cudaMalloc(&D2G, numberReactions*sizeof(unsigned int)); cudaMemcpy(D2G, D2, numberReactions*sizeof(unsigned int), cudaMemcpyHostToDevice); #else // Copy the reaction model and S matrix to constant memory on the GPU. CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(numberReactionsC, &numberReactions, sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(reactionOrdersC, reactionOrders, numberReactions*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(reactionSitesC, reactionSites, numberReactions*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(D1C, D1, numberReactions*sizeof(unsigned int))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(D2C, D2, numberReactions*sizeof(unsigned int))); #endif #ifdef MPD_GLOBAL_S_MATRIX // If S matrix stored in global memory, allocate space and perform copy cudaMalloc(&SG, numberSpecies*numberReactions * sizeof(int8_t)); cudaMemcpy(SG, tmpS, numberSpecies*numberReactions * sizeof(int8_t), cudaMemcpyHostToDevice); #else // S matrix is in constant memory CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(SC, tmpS, numberSpecies*numberReactions*sizeof(int8_t))); #endif // Free any temporary resources. delete [] reactionSites; delete [] D1; delete [] D2; delete [] reactionOrders; delete [] tmpS; } void MpdRdmeSolver::buildDiffusionModel(const uint numberSiteTypesA, const double * DFA, const uint * RLA, lattice_size_t latticeXSize, lattice_size_t latticeYSize, lattice_size_t latticeZSize, site_size_t particlesPerSite, const unsigned int bytes_per_particle, si_dist_t latticeSpacing, const uint8_t * latticeData, const uint8_t * latticeSitesData, bool rowMajorData) { RDMESolver::buildDiffusionModel(numberSiteTypesA, DFA, RLA, latticeXSize, latticeYSize, latticeZSize, particlesPerSite, bytes_per_particle, latticeSpacing, latticeData, latticeSitesData, rowMajorData); // Get the time step. tau=atof((*parameters)["timestep"].c_str()); if (tau <= 0.0) throw InvalidArgException("timestep", "A positive timestep must be specified for the solver."); maxSiteCounts.resize(numberSiteTypes); currentMaxSiteCounts.resize(numberSiteTypes); std::fill(maxSiteCounts.begin(), maxSiteCounts.end(), 0); maxParticleCounts.resize(numberSpecies+1); currentMaxParticleCounts.resize(numberSpecies+1); std::fill(maxParticleCounts.begin(), maxParticleCounts.end(), 0); // Setup the cuda transition matrix. const size_t DFmatrixSize = numberSpecies*numberSiteTypes*numberSiteTypes; #ifndef MPD_GLOBAL_T_MATRIX if (DFmatrixSize > MPD_MAX_TRANSITION_TABLE_ENTRIES) throw Exception("The number of transition table entries exceeds the maximum supported by the solver."); #endif #ifndef MPD_GLOBAL_S_MATRIX if (numberReactions*numberSiteTypes > MPD_MAX_RL_MATRIX_ENTRIES) throw Exception("The number of RL matrix entries exceeds the maximum supported by the solver."); #endif // Calculate the probability from the diffusion coefficient and the lattice properties. /* * p0 = probability of staying at the site, q = probability of moving in plus or minus direction * * D=(1-p0)*lambda^2/2*tau * q=(1-p0)/2 * D=2q*lambda^2/2*tau * q=D*tau/lambda^2 */ float * T = new float[DFmatrixSize]; for (uint i=0; i<DFmatrixSize; i++) { float q=(float)(DF[i]*tau/pow(latticeSpacing,2)); if (q > 0.50f) throw InvalidArgException("D", "The specified diffusion coefficient is too high for the diffusion model."); T[i] = q; } // Setup the cuda reaction location matrix. uint8_t * tmpRL = new uint8_t[numberReactions*numberSiteTypes]; for (uint i=0; i<numberReactions*numberSiteTypes; i++) { tmpRL[i] = RL[i]; } // Copy the diffusion model to constant memory on the GPU. #ifdef MPD_GLOBAL_T_MATRIX CUDA_EXCEPTION_CHECK(cudaMalloc(&TG, DFmatrixSize*sizeof(float))); CUDA_EXCEPTION_CHECK(cudaMemcpy(TG, T, DFmatrixSize*sizeof(float), cudaMemcpyHostToDevice)); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(TC, &TG, sizeof(float*))); #else CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(TC, T, DFmatrixSize*sizeof(float))); #endif CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(numberSpeciesC, &numberSpecies, sizeof(numberSpeciesC))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(numberSiteTypesC, &numberSiteTypes, sizeof(numberSiteTypesC))); const unsigned int latticeXYSize = latticeXSize*latticeYSize; const unsigned int latticeXYZSize = latticeXSize*latticeYSize*latticeZSize; CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(latticeXSizeC, &latticeXSize, sizeof(latticeYSizeC))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(latticeYSizeC, &latticeYSize, sizeof(latticeYSizeC))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(latticeZSizeC, &latticeZSize, sizeof(latticeZSizeC))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(global_latticeZSizeC, &latticeZSize, sizeof(latticeZSizeC))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(latticeXYSizeC, &latticeXYSize, sizeof(latticeXYSizeC))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(latticeXYZSizeC, &latticeXYZSize, sizeof(latticeXYZSizeC))); CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(global_latticeXYZSizeC, &latticeXYZSize, sizeof(latticeXYZSizeC))); #ifdef MPD_GLOBAL_S_MATRIX // Store RL in global memory too, since I'm going to assume if S is too big, then RL is too. cudaMalloc(&RLG, numberReactions*numberSiteTypes * sizeof(uint8_t)); cudaMemcpy(RLG, tmpRL, numberReactions*numberSiteTypes * sizeof(uint8_t), cudaMemcpyHostToDevice); #else // RL is stored in constant memory CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(RLC, tmpRL, numberReactions*numberSiteTypes*sizeof(uint8_t))); #endif delete [] tmpRL; delete [] T; // Set the cuda reaction model rates now that we have the subvolume size. float * reactionRates = new float[numberReactions]; for (uint i=0; i<numberReactions; i++) { if (reactionTypes[i] == ZerothOrderPropensityArgs::REACTION_TYPE) { reactionRates[i] = ((ZerothOrderPropensityArgs *)propensityFunctionArgs[i])->k*tau/(latticeXSize*latticeYSize*latticeZSize); } else if (reactionTypes[i] == FirstOrderPropensityArgs::REACTION_TYPE) { reactionRates[i] = ((FirstOrderPropensityArgs *)propensityFunctionArgs[i])->k*tau; } else if (reactionTypes[i] == SecondOrderPropensityArgs::REACTION_TYPE) { reactionRates[i] = ((SecondOrderPropensityArgs *)propensityFunctionArgs[i])->k*tau*latticeXSize*latticeYSize*latticeZSize; } else if (reactionTypes[i] == SecondOrderSelfPropensityArgs::REACTION_TYPE) { reactionRates[i] = ((SecondOrderSelfPropensityArgs *)propensityFunctionArgs[i])->k*tau*latticeXSize*latticeYSize*latticeZSize; } else { throw InvalidArgException("reactionTypeA", "the reaction type was not supported by the solver", reactionTypes[i]); } } #ifdef MPD_GLOBAL_R_MATRIX cudaMalloc(&reactionRatesG, numberReactions*sizeof(float)); cudaMemcpy(reactionRatesG, reactionRates, numberReactions*sizeof(float), cudaMemcpyHostToDevice); #else CUDA_EXCEPTION_CHECK(cudaMemcpyToSymbol(reactionRatesC, reactionRates, numberReactions*sizeof(float))); #endif delete [] reactionRates; // set up pre-configured propensity matricies size_t zeroOrderSize=numberSiteTypes; size_t firstOrderSize=numberSpecies*numberSiteTypes; size_t secondOrderSize=numberSpecies*numberSpecies*numberSiteTypes; float *zeroOrder=new float[zeroOrderSize]; float *firstOrder=new float[firstOrderSize]; float *secondOrder=new float[secondOrderSize]; float scale=latticeXSize*latticeYSize*latticeZSize; for(uint site=0; site<numberSiteTypes; site++) { uint o1=site*numberSpecies; uint o2=site*numberSpecies*numberSpecies; zeroOrder[site]=0.0f; for (uint i=0; i<numberSpecies; i++) { firstOrder[o1 + i]=0.0f; for (uint j=0; j<numberSpecies; j++) secondOrder[o2 + i*numberSpecies + j]=0.0f; } for (uint i=0; i<numberReactions; i++) { if(! RL[i*numberSiteTypes + site]) continue; switch(reactionTypes[i]) { case ZerothOrderPropensityArgs::REACTION_TYPE: { ZerothOrderPropensityArgs *rx=(ZerothOrderPropensityArgs *)propensityFunctionArgs[i]; zeroOrder[site]+=rx->k*tau/scale; } break; case FirstOrderPropensityArgs::REACTION_TYPE: { FirstOrderPropensityArgs *rx=(FirstOrderPropensityArgs *)propensityFunctionArgs[i]; firstOrder[o1 + rx->si]+=rx->k*tau; } break; case SecondOrderPropensityArgs::REACTION_TYPE: { SecondOrderPropensityArgs *rx=(SecondOrderPropensityArgs *)propensityFunctionArgs[i]; secondOrder[o2 + (rx->s1i) * numberSpecies + (rx->s2i)]=rx->k*tau*scale; secondOrder[o2 + (rx->s2i) * numberSpecies + (rx->s1i)]=rx->k*tau*scale; } break; case SecondOrderSelfPropensityArgs::REACTION_TYPE: { SecondOrderSelfPropensityArgs *rx=(SecondOrderSelfPropensityArgs *)propensityFunctionArgs[i]; secondOrder[o2 + (rx->si) * numberSpecies + (rx->si)]+=rx->k*tau*scale*2; } } } } cudaMalloc(&propZeroOrder, zeroOrderSize*sizeof(float)); cudaMemcpy(propZeroOrder, zeroOrder, zeroOrderSize*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&propFirstOrder, firstOrderSize*sizeof(float)); cudaMemcpy(propFirstOrder, firstOrder, firstOrderSize*sizeof(float), cudaMemcpyHostToDevice); cudaMalloc(&propSecondOrder, secondOrderSize*sizeof(float)); cudaMemcpy(propSecondOrder, secondOrder, secondOrderSize*sizeof(float), cudaMemcpyHostToDevice); delete[] zeroOrder; delete[] firstOrder; delete[] secondOrder; } void MpdRdmeSolver::generateTrajectory() { // Shadow the lattice member as a cuda lattice. CudaByteLattice * lattice = (CudaByteLattice *)this->lattice; // Get the interval for writing species counts and lattices. double speciesCountsWriteInterval=atof((*parameters)["writeInterval"].c_str()); double nextSpeciesCountsWriteTime = speciesCountsWriteInterval; lm::io::SpeciesCounts speciesCountsDataSet; speciesCountsDataSet.set_number_species(numberSpeciesToTrack); speciesCountsDataSet.set_number_entries(0); double latticeWriteInterval=atof((*parameters)["latticeWriteInterval"].c_str()); double nextLatticeWriteTime = latticeWriteInterval; lm::io::Lattice latticeDataSet; // Get the simulation time limit. double maxTime=atof((*parameters)["maxTime"].c_str()); Print::printf(Print::INFO, "Running mpd rdme simulation with %d species, %d reactions, %d site types for %e s with tau %e. Writing species at %e and lattice at %e intervals", numberSpecies, numberReactions, numberSiteTypes, maxTime, tau, speciesCountsWriteInterval, latticeWriteInterval); // Set the initial time. double time = 0.0; double base_time = 0.0; uint32_t timestep=1; uint32_t complete_steps=0; // Simulation Hook interface bool hookEnabled=false; double nextHookTime=0.0; double hookInterval=0.0; if((*parameters)["hookInterval"] != "") { hookInterval=atof((*parameters)["hookInterval"].c_str()); hookEnabled=hookInterval >= 0.0; nextHookTime=hookInterval; } // Find out at what interval to write status messages double printPerfInterval = 60; if((*parameters)["perfPrintInterval"] != "") { printPerfInterval=atof((*parameters)["perfPrintInterval"].c_str()); } // Call beginning hook. Any modifications to the lattice will be // accounted for because we have not yet copied to device memory // thus we do not need to check return value onBeginTrajectory(lattice); // Synchronize the cuda memory. lattice->copyToGPU(); // Record the initial species counts. recordSpeciesCounts(time, lattice, &speciesCountsDataSet); // Write the initial lattice. writeLatticeData(time, lattice, &latticeDataSet); // initialize max counts from initial particle/site lattice initMaxCounts(lattice); Timer timer; timer.tick(); double lastT=0; int lastSteps=0; while (time < maxTime) { if(globalAbort) { printf("Global abort: terminating solver\n"); break; } lastT += timer.tock(); lastSteps += 1; if ( lastT >= printPerfInterval) { double stepTime = lastT/lastSteps; double completionTime = stepTime*(maxTime-time)/tau; std::string units; if (completionTime > 60*60*24*365) { units = "weeks"; completionTime /= 60*60*24*365; } else if (completionTime > 60*60*24*30) { units = "months"; completionTime /= 60*60*24*30; } else if (completionTime > 60*60*24*7) { units = "weeks"; completionTime /= 60*60*24*7; } else if (completionTime > 60*60*24) { units = "days"; completionTime /= 60*60*24; } else if (completionTime > 60*60) { units = "hours"; completionTime /= 60*60; } else if (completionTime > 60) { units = "minutes"; completionTime /= 60; } else { units = "seconds"; } Print::printf(Print::INFO, "Average walltime per timestep: %.2f ms. Progress: %.4fs/%.4fs (% .3g%% done / %.2g %s walltime remaining)", 1000.0*stepTime, time, maxTime, 100.0*time/maxTime, completionTime, units.c_str()); lastT = 0; lastSteps = 0; } timer.tick(); // Run the next timestep. runTimestep(lattice, timestep++); complete_steps++; // Update the time. time = base_time + complete_steps*tau; // Check if we need to execute the hook if (hookEnabled && time >= nextHookTime-EPS) { lattice->copyFromGPU(); switch(hookSimulation(time, lattice)) { case 0: break; case 1: lattice->copyToGPU(); break; case 2: lattice->copyToGPU(); writeLatticeSites(time, lattice); break; default: throw("Unknown hook return value"); } nextHookTime += hookInterval; } // See if we need to write out the any data. if (time >= nextLatticeWriteTime-EPS || time >= nextSpeciesCountsWriteTime-EPS) { // rebase time to match how mgpu does it base_time = base_time + complete_steps*tau; complete_steps=0; // Synchronize the lattice. lattice->copyFromGPU(); // See if we need to write the lattice. if (time >= nextLatticeWriteTime-EPS) { PROF_BEGIN(PROF_SERIALIZE_LATTICE); writeLatticeData(time, lattice, &latticeDataSet); nextLatticeWriteTime += latticeWriteInterval; PROF_END(PROF_SERIALIZE_LATTICE); switch(onWriteLattice(time, lattice)) { case 0: break; case 1: lattice->copyToGPU(); break; case 2: lattice->copyToGPU(); writeLatticeSites(time, lattice); break; default: throw("Unknown hook return value"); } } // See if we need to write the species counts. if (time >= nextSpeciesCountsWriteTime-EPS) { PROF_BEGIN(PROF_DETERMINE_COUNTS); recordSpeciesCounts(time, lattice, &speciesCountsDataSet); nextSpeciesCountsWriteTime += speciesCountsWriteInterval; PROF_END(PROF_DETERMINE_COUNTS); // See if we have accumulated enough species counts to send. if (speciesCountsDataSet.number_entries() >= TUNE_SPECIES_COUNTS_BUFFER_SIZE) { PROF_BEGIN(PROF_SERIALIZE_COUNTS); writeSpeciesCounts(&speciesCountsDataSet); PROF_END(PROF_SERIALIZE_COUNTS); } } } } lattice->copyFromGPU(); onEndTrajectory(lattice); // Write any remaining species counts. writeSpeciesCounts(&speciesCountsDataSet); writeMaxCounts(); } // Do not do anything in this class; assume derivative classes will override int MpdRdmeSolver::hookSimulation(double time, CudaByteLattice *lattice) { // Overload this function in derivative classes // Return 0 if the lattice state is unchanged // Return 1 if the lattice state has been modified, // and it needs to be copied back to the GPU. // Return 2 if lattice sites have changed, and should // be copied back to the GPU *and* be recorded // in the output file return 0; } int MpdRdmeSolver::onWriteLattice(double time, CudaByteLattice *lattice) { // Overload this function in derivative classes // Return 0 if the lattice state is unchanged // Return 1 if the lattice state has been modified, // and it needs to be copied back to the GPU. // Return 2 if lattice sites have changed, and should // be copied back to the GPU *and* be recorded // in the output file return 0; } int MpdRdmeSolver::onBeginTrajectory(CudaByteLattice *lattice) { return 0; } int MpdRdmeSolver::onEndTrajectory(CudaByteLattice *lattice) { return 0; } void MpdRdmeSolver::initMaxCounts(CudaByteLattice *lattice) { uint8_t *sites = reinterpret_cast<uint8_t*>(lattice->getSitesMemory()); const size_t ns = lattice->getNumberSites(); std::fill(currentMaxSiteCounts.begin(), currentMaxSiteCounts.end(), 0); for (size_t i=0; i < ns; i++) { if (sites[i] > 0) { assert(sites[i] < currentMaxSiteCounts.size()); currentMaxSiteCounts[sites[i]]++; } } for (int i=0; i < maxSiteCounts.size(); i++) { maxSiteCounts[i] = std::max(maxSiteCounts[i], currentMaxSiteCounts[i]); } uint8_t *particles = reinterpret_cast<uint8_t*>(lattice->getParticlesMemory()); const size_t np = MPD_LATTICE_MAX_OCCUPANCY * lattice->getNumberSites(); std::fill(currentMaxParticleCounts.begin(), currentMaxParticleCounts.end(), 0); for (size_t i=0; i < np; i++) { if (particles[i] > 0) { currentMaxParticleCounts[particles[i]]++; } } for (int i=0; i < maxParticleCounts.size(); i++) { maxParticleCounts[i] = std::max(maxParticleCounts[i], currentMaxParticleCounts[i]); } } void MpdRdmeSolver::writeLatticeData(double time, CudaByteLattice * lattice, lm::io::Lattice * latticeDataSet) { Print::printf(Print::DEBUG, "Writing lattice at %e s", time); // Record the lattice data. latticeDataSet->Clear(); latticeDataSet->set_lattice_x_size(lattice->getSize().x); latticeDataSet->set_lattice_y_size(lattice->getSize().y); latticeDataSet->set_lattice_z_size(lattice->getSize().z); latticeDataSet->set_particles_per_site(lattice->getMaxOccupancy()); latticeDataSet->set_time(time); // update max counts uint8_t *particles = reinterpret_cast<uint8_t*>(lattice->getParticlesMemory()); const size_t np = MPD_LATTICE_MAX_OCCUPANCY * lattice->getNumberSites(); std::fill(currentMaxParticleCounts.begin(), currentMaxParticleCounts.end(), 0); for (size_t i=0; i < np; i++) { if (particles[i] > 0) { currentMaxParticleCounts[particles[i]]++; } } for (int i=0; i < maxParticleCounts.size(); i++) { maxParticleCounts[i] = std::max(maxParticleCounts[i], currentMaxParticleCounts[i]); } // Push it to the output queue. size_t payloadSize = size_t(lattice->getSize().x)*size_t(lattice->getSize().y)*size_t(lattice->getSize().z)*size_t(lattice->getMaxOccupancy())*sizeof(uint8_t); lm::main::DataOutputQueue::getInstance()->pushDataSet(lm::main::DataOutputQueue::BYTE_LATTICE, replicate, latticeDataSet, lattice, payloadSize, &lm::rdme::ByteLattice::nativeSerialize); } void MpdRdmeSolver::writeLatticeSites(double time, CudaByteLattice * lattice) { Print::printf(Print::DEBUG, "Writing lattice sites at %e s", time); lm::io::Lattice latticeDataSet; // Record the lattice data. latticeDataSet.Clear(); latticeDataSet.set_lattice_x_size(lattice->getSize().x); latticeDataSet.set_lattice_y_size(lattice->getSize().y); latticeDataSet.set_lattice_z_size(lattice->getSize().z); latticeDataSet.set_particles_per_site(lattice->getMaxOccupancy()); latticeDataSet.set_time(time); // update max counts uint8_t *sites = reinterpret_cast<uint8_t*>(lattice->getSitesMemory()); const size_t ns = lattice->getNumberSites(); std::fill(currentMaxSiteCounts.begin(), currentMaxSiteCounts.end(), 0); for (size_t i=0; i < ns; i++) { if (sites[i] > 0) { assert(sites[i] < currentMaxSiteCounts.size()); currentMaxSiteCounts[sites[i]]++; } } for (int i=0; i < maxSiteCounts.size(); i++) { maxSiteCounts[i] = std::max(maxSiteCounts[i], currentMaxSiteCounts[i]); } // Push it to the output queue. size_t payloadSize = lattice->getSize().x*lattice->getSize().y*lattice->getSize().z*sizeof(uint8_t); lm::main::DataOutputQueue::getInstance()->pushDataSet(lm::main::DataOutputQueue::SITE_LATTICE, replicate, &latticeDataSet, lattice, payloadSize, &lm::rdme::ByteLattice::nativeSerializeSites); } void MpdRdmeSolver::writeMaxCounts() { std::vector<size_t> shape_p(1); shape_p[0] = maxParticleCounts.size(); H5MetaData md_p = make_H5_meta<uint32_t>(H5MetaData::NEW_DATASET, shape_p, "MaxParticleCounts"); lm::main::DataOutputQueue::getInstance()->pushDataSet(md_p, replicate, &(maxParticleCounts[0])); std::vector<size_t> shape_s(1); shape_s[0] = maxSiteCounts.size(); H5MetaData md_s = make_H5_meta<uint32_t>(H5MetaData::NEW_DATASET, shape_s, "MaxSiteCounts"); lm::main::DataOutputQueue::getInstance()->pushDataSet(md_s, replicate, &(maxSiteCounts[0])); } void MpdRdmeSolver::recordSpeciesCounts(double time, CudaByteLattice * lattice, lm::io::SpeciesCounts * speciesCountsDataSet) { std::map<particle_t,uint> particleCounts = lattice->getParticleCounts(); speciesCountsDataSet->set_number_entries(speciesCountsDataSet->number_entries()+1); speciesCountsDataSet->add_time(time); for (particle_t p=0; p<numberSpeciesToTrack; p++) { speciesCountsDataSet->add_species_count((particleCounts.count(p+1)>0)?particleCounts[p+1]:0); } } void MpdRdmeSolver::writeSpeciesCounts(lm::io::SpeciesCounts * speciesCountsDataSet) { if (speciesCountsDataSet->number_entries() > 0) { // Push it to the output queue. lm::main::DataOutputQueue::getInstance()->pushDataSet(lm::main::DataOutputQueue::SPECIES_COUNTS, replicate, speciesCountsDataSet); // Reset the data set. speciesCountsDataSet->Clear(); speciesCountsDataSet->set_number_species(numberSpeciesToTrack); speciesCountsDataSet->set_number_entries(0); } } uint64_t MpdRdmeSolver::getTimestepSeed(uint32_t timestep, uint32_t substep) { uint64_t timestepHash = (((((uint64_t)seed)<<30)+timestep)<<2)+substep; timestepHash = timestepHash * 3202034522624059733ULL + 4354685564936845319ULL; timestepHash ^= timestepHash >> 20; timestepHash ^= timestepHash << 41; timestepHash ^= timestepHash >> 5; timestepHash *= 7664345821815920749ULL; return timestepHash; } void MpdRdmeSolver::runTimestep(CudaByteLattice * lattice, uint32_t timestep) { PROF_BEGIN(PROF_MPD_TIMESTEP); // Calculate some properties of the lattice. lattice_coord_t size = lattice->getSize(); const unsigned int latticeXSize = size.x; const unsigned int latticeYSize = size.y; const unsigned int latticeZSize = size.z; dim3 gridSize, threadBlockSize; // Execute the kernel for the x direction. PROF_CUDA_START(cudaStream); PROF_CUDA_BEGIN(PROF_MPD_X_DIFFUSION,cudaStream); #ifdef MPD_CUDA_3D_GRID_LAUNCH calculateXLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_x_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,0), (unsigned int*)cudaOverflowList))); #else unsigned int gridXSize; calculateXLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, TUNE_MPD_X_BLOCK_MAX_X_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_x_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), gridXSize, getTimestepSeed(timestep,0), (unsigned int*)cudaOverflowList))); #endif PROF_CUDA_END(PROF_MPD_X_DIFFUSION,cudaStream); lattice->swapSrcDest(); // Execute the kernel for the y direction. PROF_CUDA_BEGIN(PROF_MPD_Y_DIFFUSION,cudaStream); #ifdef MPD_CUDA_3D_GRID_LAUNCH calculateYLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_Y_BLOCK_X_SIZE, TUNE_MPD_Y_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_y_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,1), (unsigned int*)cudaOverflowList))); #else calculateYLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, TUNE_MPD_Y_BLOCK_X_SIZE, TUNE_MPD_Y_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_y_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), gridXSize, getTimestepSeed(timestep,1), (unsigned int*)cudaOverflowList))); #endif PROF_CUDA_END(PROF_MPD_Y_DIFFUSION,cudaStream); lattice->swapSrcDest(); // Execute the kernel for the z direction. PROF_CUDA_BEGIN(PROF_MPD_Z_DIFFUSION,cudaStream); #ifdef MPD_CUDA_3D_GRID_LAUNCH calculateZLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_Z_BLOCK_X_SIZE, TUNE_MPD_Z_BLOCK_Z_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_z_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), getTimestepSeed(timestep,2), (unsigned int*)cudaOverflowList))); #else calculateZLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, TUNE_MPD_Z_BLOCK_X_SIZE, TUNE_MPD_Z_BLOCK_Z_SIZE, latticeXSize, latticeYSize, latticeZSize); CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::mpd_z_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemoryDest(), gridXSize, getTimestepSeed(timestep,2), (unsigned int*)cudaOverflowList))); #endif PROF_CUDA_END(PROF_MPD_Z_DIFFUSION,cudaStream); lattice->swapSrcDest(); if (numberReactions > 0) { // Execute the kernel for the reaction, this kernel updates the lattice in-place, so only the src pointer is passed. PROF_CUDA_BEGIN(PROF_MPD_REACTION,cudaStream); #ifdef MPD_CUDA_3D_GRID_LAUNCH calculateReactionLaunchParameters(&gridSize, &threadBlockSize, TUNE_MPD_REACTION_BLOCK_X_SIZE, TUNE_MPD_REACTION_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX #ifdef MPD_FREAKYFAST CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::precomp_reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG, propZeroOrder, propFirstOrder, propSecondOrder))); #else CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG))); #endif #else #ifdef MPD_FREAKYFAST CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::precomp_reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG, propZeroOrder, propFirstOrder, propSecondOrder))); #else CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG))); #endif #endif #else CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList))); #endif #else calculateReactionLaunchParameters(&gridXSize, &gridSize, &threadBlockSize, TUNE_MPD_REACTION_BLOCK_X_SIZE, TUNE_MPD_REACTION_BLOCK_Y_SIZE, latticeXSize, latticeYSize, latticeZSize); #ifdef MPD_GLOBAL_S_MATRIX CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), gridXSize, getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList, SG, RLG))); #else CUDA_EXCEPTION_EXECUTE((mpdrdme_dev::reaction_kernel<<<gridSize,threadBlockSize,0,cudaStream>>>((unsigned int *)lattice->getGPUMemorySrc(), (uint8_t *)lattice->getGPUMemorySiteTypes(), (unsigned int *)lattice->getGPUMemorySrc(), gridXSize, getTimestepSeed(timestep,3), (unsigned int*)cudaOverflowList))); #endif #endif PROF_CUDA_END(PROF_MPD_REACTION,cudaStream); } if(overflow_handling == OVERFLOW_MODE_RELAXED) mpdrdme_dev::correct_overflows<<<dim3(1,1,1), dim3(TUNE_MPD_MAX_PARTICLE_OVERFLOWS,1,1),0,cudaStream>>>((unsigned int*)lattice->getGPUMemorySrc(), (unsigned int*)cudaOverflowList); // Wait for the kernels to complete. PROF_BEGIN(PROF_MPD_SYNCHRONIZE); CUDA_EXCEPTION_CHECK(cudaStreamSynchronize(cudaStream)); PROF_END(PROF_MPD_SYNCHRONIZE); if(overflow_handling == OVERFLOW_MODE_RELAXED) { uint unresolved = ((unsigned int*)cudaOverflowList)[0]; if (unresolved > 0) { Print::printf(Print::WARNING, "%d unresolved overflows", unresolved); } } if(overflow_handling == OVERFLOW_MODE_CLASSIC) { // Handle any particle overflows. PROF_BEGIN(PROF_MPD_OVERFLOW); overflowTimesteps++; #ifndef MPD_MAPPED_OVERFLOWS uint32_t overflowList[1+2*TUNE_MPD_MAX_PARTICLE_OVERFLOWS]; CUDA_EXCEPTION_CHECK(cudaMemcpy(overflowList, cudaOverflowList, MPD_OVERFLOW_LIST_SIZE, cudaMemcpyDeviceToHost)); #else uint32_t *overflowList = (uint32_t*)cudaOverflowList; #endif uint numberExceptions = overflowList[0]; if (numberExceptions > 0) { Print::printf(Print::DEBUG, "%d overflows", numberExceptions); // Make sure we did not exceed the overflow buffer. if (numberExceptions > TUNE_MPD_MAX_PARTICLE_OVERFLOWS) throw Exception("Too many particle overflows for the available buffer", numberExceptions); // Synchronize the lattice. lattice->copyFromGPU(); // Go through each exception. for (uint i=0; i<numberExceptions; i++) { // Extract the index and particle type. lattice_size_t latticeIndex = overflowList[(i*2)+1]; particle_t particle = overflowList[(i*2)+2]; // Get the x, y, and z coordiantes. lattice_size_t x = latticeIndex%lattice->getXSize(); lattice_size_t y = (latticeIndex/lattice->getXSize())%lattice->getYSize(); lattice_size_t z = latticeIndex/(lattice->getXSize()*lattice->getYSize()); // Put the particles back into a nearby lattice site. bool replacedParticle = false; for (uint searchRadius=0; !replacedParticle && searchRadius <= TUNE_MPD_MAX_OVERFLOW_REPLACEMENT_DIST; searchRadius++) { // Get the nearby sites. std::vector<lattice_coord_t> sites = lattice->getNearbySites(x,y,z,(searchRadius>0)?searchRadius-1:0,searchRadius); // TODO: Shuffle the sites. // Try to find one that in not fully occupied and of the same type. for (std::vector<lattice_coord_t>::iterator it=sites.begin(); it<sites.end(); it++) { lattice_coord_t site = *it; if (lattice->getOccupancy(site.x,site.y,site.z) < lattice->getMaxOccupancy() && lattice->getSiteType(site.x,site.y,site.z) == lattice->getSiteType(x,y,z)) { lattice->addParticle(site.x, site.y, site.z, particle); replacedParticle = true; Print::printf(Print::VERBOSE_DEBUG, "Handled overflow of particle %d at site %d,%d,%d type=%d occ=%d by placing at site %d,%d,%d type=%d newocc=%d dist=%0.2f", particle, x, y, z, lattice->getSiteType(x,y,z), lattice->getOccupancy(x,y,z), site.x, site.y, site.z, lattice->getSiteType(site.x,site.y,site.z), lattice->getOccupancy(site.x,site.y,site.z), sqrt(pow((double)x-(double)site.x,2.0)+pow((double)y-(double)site.y,2.0)+pow((double)z-(double)site.z,2.0))); break; } } } // If we were not able to fix the exception, throw an error. if (!replacedParticle) throw Exception("Unable to find an available site to handle a particle overflow."); } // Copy the changes back to the GPU. lattice->copyToGPU(); // Reset the overflow list. CUDA_EXCEPTION_CHECK(cudaMemset(cudaOverflowList, 0, MPD_OVERFLOW_LIST_SIZE)); // Track that we used the overflow list. overflowListUses++; } // If the overflow lsit is being used too often, print a warning. if (overflowTimesteps >= 1000) { if (overflowListUses > 10) Print::printf(Print::WARNING, "%d uses of the particle overflow list in the last 1000 timesteps, performance may be degraded.", overflowListUses); overflowTimesteps = 0; overflowListUses = 0; } PROF_END(PROF_MPD_OVERFLOW); } // End classic overflow PROF_CUDA_FINISH(cudaStream); PROF_END(PROF_MPD_TIMESTEP); } #ifdef MPD_CUDA_3D_GRID_LAUNCH /** * Gets the launch parameters for launching an x diffusion kernel. */ void MpdRdmeSolver::calculateXLaunchParameters(dim3 * gridSize, dim3 * threadBlockSize, const unsigned int maxXBlockSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { unsigned int xBlockXSize = min(maxXBlockSize,latticeXSize); unsigned int gridXSize = latticeXSize/xBlockXSize; if (gridXSize*xBlockXSize != latticeXSize) { // Find the largest number of warps that is divisible unsigned int tryx=32; while(tryx < maxXBlockSize) { if (latticeXSize % tryx == 0) xBlockXSize = tryx; tryx +=32; } gridXSize = latticeXSize/xBlockXSize; } (*gridSize).x = gridXSize; (*gridSize).y = latticeYSize; (*gridSize).z = latticeZSize; (*threadBlockSize).x = xBlockXSize; (*threadBlockSize).y = 1; (*threadBlockSize).z = 1; } /** * Gets the launch parameters for launching a y diffusion kernel. */ void MpdRdmeSolver::calculateYLaunchParameters(dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { (*gridSize).x = latticeXSize/blockXSize; (*gridSize).y = latticeYSize/blockYSize; (*gridSize).z = latticeZSize; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = blockYSize; (*threadBlockSize).z = 1; } /** * Gets the launch parameters for launching a z diffusion kernel. */ void MpdRdmeSolver::calculateZLaunchParameters(dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockZSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { (*gridSize).x = latticeXSize/blockXSize; (*gridSize).y = latticeYSize; (*gridSize).z = latticeZSize/blockZSize; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = 1; (*threadBlockSize).z = blockZSize; } /** * Gets the launch parameters for launching a y diffusion kernel. */ void MpdRdmeSolver::calculateReactionLaunchParameters(dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { (*gridSize).x = latticeXSize/blockXSize; (*gridSize).y = latticeYSize/blockYSize; (*gridSize).z = latticeZSize; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = blockYSize; (*threadBlockSize).z = 1; } #else /** * Gets the launch parameters for launching an x diffusion kernel. */ void MpdRdmeSolver::calculateXLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int maxXBlockSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { unsigned int xBlockXSize = min(maxXBlockSize,latticeXSize); *gridXSize = latticeXSize/xBlockXSize; if (gridXSize*xBlockXSize != latticeXSize) { // Find the largest number of warps that is divisible unsigned int tryx=32; while(tryx < maxXBlockSize) { if (latticeXSize % tryx == 0) xBlockXSize = tryx; tryx +=32; } gridXSize = latticeXSize/xBlockXSize; } (*gridSize).x = (*gridXSize)*latticeYSize; (*gridSize).y = latticeZSize; (*gridSize).z = 1; (*threadBlockSize).x = xBlockXSize; (*threadBlockSize).y = 1; (*threadBlockSize).z = 1; } /** * Gets the launch parameters for launching a y diffusion kernel. */ void MpdRdmeSolver::calculateYLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { *gridXSize = latticeXSize/blockXSize; (*gridSize).x = (*gridXSize)*(latticeYSize/blockYSize); (*gridSize).y = latticeZSize; (*gridSize).z = 1; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = blockYSize; (*threadBlockSize).z = 1; } /** * Gets the launch parameters for launching a z diffusion kernel. */ void MpdRdmeSolver::calculateZLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockZSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { *gridXSize = latticeXSize/blockXSize; (*gridSize).x = (*gridXSize)*(latticeYSize); (*gridSize).y = latticeZSize/blockZSize; (*gridSize).z = 1; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = 1; (*threadBlockSize).z = blockZSize; } /** * Gets the launch parameters for launching a reaction diffusion kernel. */ void MpdRdmeSolver::calculateReactionLaunchParameters(unsigned int * gridXSize, dim3 * gridSize, dim3 * threadBlockSize, const unsigned int blockXSize, const unsigned int blockYSize, const unsigned int latticeXSize, const unsigned int latticeYSize, const unsigned int latticeZSize) { *gridXSize = latticeXSize/blockXSize; (*gridSize).x = (*gridXSize)*(latticeYSize/blockYSize); (*gridSize).y = latticeZSize; (*gridSize).z = 1; (*threadBlockSize).x = blockXSize; (*threadBlockSize).y = blockYSize; (*threadBlockSize).z = 1; } #endif namespace mpdrdme_dev { /** * Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice * site, storing the new lattice into shared memory, and then updating the global lattice. */ #ifdef MPD_CUDA_3D_GRID_LAUNCH __global__ void __launch_bounds__(TUNE_MPD_X_BLOCK_MAX_X_SIZE,1) mpd_x_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList) { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; #else __global__ void __launch_bounds__(TUNE_MPD_X_BLOCK_MAX_X_SIZE,1) mpd_x_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); #endif // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeXIndex = (bx*blockDim.x) + threadIdx.x; unsigned int latticeIndex = (bz*latticeXYSizeC) + (by*latticeXSizeC) + latticeXIndex; unsigned int windowIndex = threadIdx.x+MPD_APRON_SIZE; /////////////////////////////////////////// // Load the lattice into shared memory. // /////////////////////////////////////////// // Shared memory to store the lattice segment. __shared__ unsigned int window[MPD_X_WINDOW_SIZE*MPD_WORDS_PER_SITE]; __shared__ uint8_t sitesWindow[MPD_X_WINDOW_SIZE]; // Copy the x window from device memory into shared memory. copyXWindowFromLattice(bx, inLattice, window, latticeIndex, latticeXIndex, windowIndex); copyXWindowFromSites(bx, inSites, sitesWindow, latticeIndex, latticeXIndex, windowIndex); __syncthreads(); //////////////////////////////////////// // Make the choice for each particle. // //////////////////////////////////////// __shared__ unsigned int choices[MPD_X_WINDOW_SIZE*MPD_WORDS_PER_SITE]; // Make the choices. makeXDiffusionChoices(window, sitesWindow, choices, latticeIndex, latticeXIndex, windowIndex, blockDim.x, timestepHash); __syncthreads(); ////////////////////////////////////////////////////////// // Create version of the lattice at the next time step. // ////////////////////////////////////////////////////////// // Propagate the choices to the new lattice segment. performPropagation(outLattice, window, choices, latticeIndex, windowIndex-1, windowIndex, windowIndex+1, MPD_X_WINDOW_SIZE, siteOverflowList); } /** * Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice * site, storing the new lattice into shared memory, and then updating the global lattice. */ #ifdef MPD_CUDA_3D_GRID_LAUNCH __global__ void __launch_bounds__(TUNE_MPD_Y_BLOCK_X_SIZE*TUNE_MPD_Y_BLOCK_Y_SIZE,1) mpd_y_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList) { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; #else __global__ void __launch_bounds__(TUNE_MPD_Y_BLOCK_X_SIZE*TUNE_MPD_Y_BLOCK_Y_SIZE,1) mpd_y_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); #endif // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y; unsigned int latticeIndex = (bz*latticeXYSizeC) + (latticeYIndex*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; unsigned int windowYIndex = threadIdx.y+MPD_APRON_SIZE; unsigned int windowIndex = (windowYIndex*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the lattice into shared memory. // /////////////////////////////////////////// // Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle. __shared__ unsigned int window[MPD_Y_WINDOW_SIZE*MPD_WORDS_PER_SITE]; __shared__ uint8_t sitesWindow[MPD_Y_WINDOW_SIZE]; // Copy the x window from device memory into shared memory. copyYWindowFromLattice(inLattice, window, latticeIndex, latticeYIndex, windowIndex, windowYIndex); copyYWindowFromSites(inSites, sitesWindow, latticeIndex, latticeYIndex, windowIndex, windowYIndex); __syncthreads(); //////////////////////////////////////// // Make the choice for each particle. // //////////////////////////////////////// __shared__ unsigned int choices[MPD_Y_WINDOW_SIZE*MPD_WORDS_PER_SITE]; // Make the choices. makeYDiffusionChoices(window, sitesWindow, choices, latticeIndex, latticeYIndex, windowIndex, windowYIndex, timestepHash); __syncthreads(); ////////////////////////////////////////////////////////// // Create version of the lattice at the next time step. // ////////////////////////////////////////////////////////// // Progate the choices to the new lattice segment. performPropagation(outLattice, window, choices, latticeIndex, windowIndex-TUNE_MPD_Y_BLOCK_X_SIZE, windowIndex, windowIndex+TUNE_MPD_Y_BLOCK_X_SIZE, MPD_Y_WINDOW_SIZE, siteOverflowList); } /** * Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice * site, storing the new lattice into shared memory, and then updating the global lattice. */ #ifdef MPD_CUDA_3D_GRID_LAUNCH __global__ void __launch_bounds__(TUNE_MPD_Z_BLOCK_X_SIZE*TUNE_MPD_Z_BLOCK_Z_SIZE,1) mpd_z_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList) { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; #else __global__ void __launch_bounds__(TUNE_MPD_Z_BLOCK_X_SIZE*TUNE_MPD_Z_BLOCK_Z_SIZE,1) mpd_z_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList) { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); #endif // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeZIndex = (bz*blockDim.z) + threadIdx.z; unsigned int latticeIndex = (latticeZIndex*latticeXYSizeC) + (by*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; unsigned int windowZIndex = threadIdx.z+MPD_APRON_SIZE; unsigned int windowIndex = (windowZIndex*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the lattice into shared memory. // /////////////////////////////////////////// // Shared memory to store the lattice segment. Each lattice site has four particles, eight bits for each particle. __shared__ unsigned int window[MPD_Z_WINDOW_SIZE*MPD_WORDS_PER_SITE]; __shared__ uint8_t sitesWindow[MPD_Z_WINDOW_SIZE]; // Copy the x window from device memory into shared memory. copyZWindowFromLattice(inLattice, window, latticeIndex, latticeZIndex, windowIndex, windowZIndex); copyZWindowFromSites(inSites, sitesWindow, latticeIndex, latticeZIndex, windowIndex, windowZIndex); __syncthreads(); //////////////////////////////////////// // Make the choice for each particle. // //////////////////////////////////////// __shared__ unsigned int choices[MPD_Z_WINDOW_SIZE*MPD_WORDS_PER_SITE]; // Make the choices. makeZDiffusionChoices(window, sitesWindow, choices, latticeIndex, latticeZIndex, windowIndex, windowZIndex, timestepHash); __syncthreads(); ////////////////////////////////////////////////////////// // Create version of the lattice at the next time step. // ////////////////////////////////////////////////////////// // Progate the choices to the new lattice segment. performPropagation(outLattice, window, choices, latticeIndex, windowIndex-TUNE_MPD_Z_BLOCK_X_SIZE, windowIndex, windowIndex+TUNE_MPD_Z_BLOCK_X_SIZE, MPD_Z_WINDOW_SIZE, siteOverflowList); } /** * Multiparticle diffusion performed by copying the lattice section to shared memory, making a choice for each lattice * site, storing the new lattice into shared memory, and then updating the global lattice. */ #ifdef MPD_CUDA_3D_GRID_LAUNCH #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const unsigned int* __restrict__ reactionOrdersG, const unsigned int* __restrict__ reactionSitesG, const unsigned int* __restrict__ D1G, const unsigned int* __restrict__ D2G, const float* __restrict__ reactionRatesG) #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG) #endif #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList) #endif { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; #else #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const unsigned int* __restrict__ reactionOrdersG, const unsigned int* __restrict__ reactionSitesG, const unsigned int* __restrict__ D1G, const unsigned int* __restrict__ D2G, const float* __restrict__ reactionRatesG) #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG) #endif #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned int gridXSize, const unsigned long long timestepHash, unsigned int* siteOverflowList) #endif { __shared__ unsigned int bx, by, bz; calculateBlockPosition(&bx, &by, &bz, gridXSize); #endif // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y; unsigned int latticeIndex = (bz*latticeXYSizeC) + (latticeYIndex*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the particles and site. // /////////////////////////////////////////// unsigned int particles[MPD_WORDS_PER_SITE]; for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) particles[w] = inLattice[latticeIndex+latticeOffset]; uint8_t siteType = inSites[latticeIndex]; //////////////////////////////////////// // Perform the reactions. // //////////////////////////////////////// // Calculate the kinetic rate for each reaction at this site. float totalReactionPropensity = 0.0f; for (int i=0; i<numberReactionsC; i++) { #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX totalReactionPropensity += calculateReactionPropensity(siteType, (uint8_t*)particles, i, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG); #else totalReactionPropensity += calculateReactionPropensity(siteType, (uint8_t*)particles, i, RLG); #endif #else totalReactionPropensity += calculateReactionPropensity(siteType, (uint8_t*)particles, i); #endif } // If propensity is zero, no reaction can occur. if(totalReactionPropensity == 0.0f) return; // See if a reaction occurred at the site. float reactionProbability = calculateReactionProbability(totalReactionPropensity); unsigned int reactionOccurred = checkForReaction(latticeIndex, reactionProbability, timestepHash); // If there was a reaction, process it. if (reactionOccurred) { // Figure out which reaction occurred. #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG); #else unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash, RLG); #endif #else unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash); #endif // Construct the new site. #ifdef MPD_GLOBAL_S_MATRIX evaluateReaction(latticeIndex, siteType, (uint8_t*)particles, reactionIndex, siteOverflowList, SG); #else evaluateReaction(latticeIndex, siteType, (uint8_t*)particles, reactionIndex, siteOverflowList); #endif // Copy the new particles back into the lattice. for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) outLattice[latticeIndex+latticeOffset] = particles[w]; } } #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) precomp_reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const unsigned int* __restrict__ reactionOrdersG, const unsigned int* __restrict__ reactionSitesG, const unsigned int* __restrict__ D1G, const unsigned int* __restrict__ D2G, const float* __restrict__ reactionRatesG, const float* __restrict__ qp0, const float* __restrict__ qp1, const float* __restrict__ qp2) #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) precomp_reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const __restrict__ int8_t *SG, const __restrict__ uint8_t *RLG, const float* __restrict__ qp0, const float* __restrict__ qp1, const float* __restrict__ qp2) #endif #else __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) precomp_reaction_kernel(const unsigned int* inLattice, const uint8_t * inSites, unsigned int* outLattice, const unsigned long long timestepHash, unsigned int* siteOverflowList, const float* __restrict__ qp0, const float* __restrict__ qp1, const float* __restrict__ qp2) #endif { unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y; unsigned int latticeIndex = (bz*latticeXYSizeC) + (latticeYIndex*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the particles and site. // /////////////////////////////////////////// unsigned int particles[MPD_WORDS_PER_SITE]; for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) particles[w] = inLattice[latticeIndex+latticeOffset]; uint8_t siteType = inSites[latticeIndex]; //////////////////////////////////////// // Perform the reactions. // //////////////////////////////////////// // Calculate the kinetic rate for each reaction at this site. float totalReactionPropensity = read_element(qp0,siteType); //float totalReactionPropensity = qp0[siteType]; for(uint i=0; i<MPD_PARTICLES_PER_SITE; i++) { uint8_t p1=((uint8_t*)particles)[i]; if(p1 > 0) { totalReactionPropensity += read_element(qp1,(siteType * numberSpeciesC + (p1-1))); //totalReactionPropensity += qp1[(siteType * numberSpeciesC + (p1-1))]; for(uint j=i+1; j<MPD_PARTICLES_PER_SITE; j++) { uint8_t p2=((uint8_t*)particles)[j]; if(p2 > 0) { totalReactionPropensity += read_element(qp2,siteType*numberSpeciesC*numberSpeciesC + (p1-1)*numberSpeciesC + (p2-1)); //totalReactionPropensity += qp2[siteType*numberSpeciesC*numberSpeciesC + (p1-1)*numberSpeciesC + (p2-1)]; } } } } // If propensity is zero, no reaction can occur. if(totalReactionPropensity == 0.0f) return; // See if a reaction occurred at the site. float reactionProbability = calculateReactionProbability(totalReactionPropensity); unsigned int reactionOccurred = checkForReaction(latticeIndex, reactionProbability, timestepHash); // If there was a reaction, process it. if (reactionOccurred) { // Figure out which reaction occurred. #ifdef MPD_GLOBAL_S_MATRIX #ifdef MPD_GLOBAL_R_MATRIX unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash, RLG, reactionOrdersG, reactionSitesG, D1G, D2G, reactionRatesG); #else unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash, RLG); #endif #else unsigned int reactionIndex = determineReactionIndex(siteType, (uint8_t*)particles, latticeIndex, totalReactionPropensity, timestepHash); #endif // Construct the new site. #ifdef MPD_GLOBAL_S_MATRIX evaluateReaction(latticeIndex, siteType, (uint8_t*)particles, reactionIndex, siteOverflowList, SG); #else evaluateReaction(latticeIndex, siteType, (uint8_t*)particles, reactionIndex, siteOverflowList); #endif // Copy the new particles back into the lattice. for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) outLattice[latticeIndex+latticeOffset] = particles[w]; } } __global__ void correct_overflows(unsigned int* lattice, unsigned int* siteOverflowList) { // Remember: #define MPD_OVERFLOW_LIST_SIZE 1+2*TUNE_MPD_MAX_PARTICLE_OVERFLOWS*sizeof(uint32_t) // Requirement: one block with TUNE_MPD_MAX_PARTICLE_OVERFLOWS threads const unsigned int i = threadIdx.x; const unsigned int total = siteOverflowList[0]; __shared__ unsigned int indexes[TUNE_MPD_MAX_PARTICLE_OVERFLOWS]; __shared__ unsigned int maxround; // Do I have an overflow to look at? if(i >= total) return; // Abort if overflows have overflown if(threadIdx.x == 0) assert(total < TUNE_MPD_MAX_PARTICLE_OVERFLOWS); // load our index lattice_size_t latticeIndex = siteOverflowList[(i*2)+1]; particle_t particle = siteOverflowList[(i*2)+2]; indexes[i] = latticeIndex; // zero out list maxround=0; __syncthreads(); siteOverflowList[0]=0; // Discover which round I should go in. To prevent situations where two threads // will try and add a particle to the same site at the same time, and thus // negating the others, each thread determines what round it is allowed to make // edits in, by determining how many previous overflows are occuring at the // same index. int round=0; for(int j=0; j < i; j++) { if(indexes[j] == latticeIndex) round++; } atomicMax(&maxround, round); __syncthreads(); for(int r=0; r <= maxround; r++) { if(round == r) { unsigned int particles[MPD_WORDS_PER_SITE]; for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) particles[w] = lattice[latticeIndex+latticeOffset]; uint8_t *p=(uint8_t*)particles; int ok=0; for(int pi=0; pi<MPD_PARTICLES_PER_SITE; pi++) { if(p[pi] == 0) { p[pi]=particle; ok=1; //printf("(round %d) Corrected overflow of particle %d at index %d\n", r, particle, latticeIndex); break; } } if(ok) { // Copy the new particles back into the lattice. for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) lattice[latticeIndex+latticeOffset] = particles[w]; } else { int exceptionIndex = atomicAdd(siteOverflowList, 1); siteOverflowList[(exceptionIndex*2)+1]=latticeIndex; siteOverflowList[(exceptionIndex*2)+2]=particle; //printf("(round %d) Failed to correct overflow of particle %d at index %d\n", r, particle, latticeIndex); } } __syncthreads(); } //if(i == 0) //printf("in: %d overflows, out: %d\n", total, siteOverflowList[0]); } // Sanity test kernel to compare kernel outputs __global__ void __launch_bounds__(TUNE_MPD_REACTION_BLOCK_X_SIZE*TUNE_MPD_REACTION_BLOCK_Y_SIZE,1) sanity_check(const unsigned int* L1, const unsigned int* L2) { // Figure out the offset of this thread in the lattice and the lattice segment. unsigned int bx=blockIdx.x, by=blockIdx.y, bz=blockIdx.z; unsigned int latticeYIndex = (by*blockDim.y) + threadIdx.y; unsigned int latticeIndex = (bz*latticeXYSizeC) + (latticeYIndex*latticeXSizeC) + (bx*blockDim.x) + threadIdx.x; /////////////////////////////////////////// // Load the particles and site. // /////////////////////////////////////////// unsigned int particles1; unsigned int particles2; for (uint w=0, latticeOffset=0; w<MPD_WORDS_PER_SITE; w++, latticeOffset+=latticeXYZSizeC) { particles1 = L1[latticeIndex+latticeOffset]; particles2 = L2[latticeIndex+latticeOffset]; if(particles1 != particles2) { printf("**\n ********* Sanity failure block(%d,%d,%d) thread(%d,%d,%d) w=%d L1=%8X L2=%8X\n**\n", bx,by,bz, threadIdx.x, threadIdx.y, threadIdx.z, w, particles1, particles2); } } } } } }
40d17cc1e5441d12596dc50b7dc34172397541ea.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <layer_kernels.cuh> #include "dropc/dropc_dev.hpp" #include "dropc/dropc_bit_dev.hpp" /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ __global__ void kLogregCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x; if (tx < numCases) { const int label = int(labels[tx]); const float maxp = maxProbs[tx]; const float labelp = probs[label * numCases + tx]; labelLogProbs[tx] = __logf(labelp); /* * Compute the probability of guessing the correct case if you take the most-probable label. * * This is done like this: * * - If the most probable label is not equal to the true label, then the probability is zero. * - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum). * * This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned * maximum probability. But it's a safety measure to prevent over-estimating your accuracy. * Though it could never happen in reality. Well it could. But it wouldn't. Cool? */ if (labelp != maxp) { correctProbs[tx] = 0; } else { int numMax = 0; for (int i = 0; i < numOut; i++) { numMax += probs[i * numCases + tx] == maxp; } correctProbs[tx] = 1.0f / float(numMax); } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dy_l: (numOut, numCases) */ template <bool add> __global__ void kLogregCostGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * (label == ty); v = __fdividef(v, y_l[tidx]); if (add) { dE_dy_l[tidx] += v; } else { dE_dy_l[tidx] = v; } } } /* * dE_dy_l: (numOut, numCases) * y_l: (numOut, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { float v = 0; for (int j = 0; j < numOut; j++) { v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]); } v *= y_l[tidx]; if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kLogregSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * ((label == ty) - y_l[tidx]); if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } template <int B_X, bool add> __global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target, const int numElements) { for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) { if (add) { target[i] += actGrad[i] * (output[i] == input[i]); } else { target[i] = actGrad[i] * (output[i] == input[i]); } } } void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) { assert(actGrad.isContiguous()); assert(output.isContiguous()); assert(input.isContiguous()); assert(actGrad.isSameDims(input)); assert(actGrad.isSameDims(output)); dim3 blocks(DIVUP(actGrad.getNumElements(), 128)); dim3 threads(128); if (add) { assert(actGrad.isSameDims(target)); hipFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kEltwiseMaxGrad<128, true>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } else { target.resize(actGrad); hipFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, hipFuncCachePreferL1); hipLaunchKernelGGL(( kEltwiseMaxGrad<128, false>), dim3(blocks), dim3(threads), 0, 0, actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } cutilCheckMsg("computeEltwiseMaxGrad: Kernel execution failed"); } /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ void computeLogregCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) { int numCases = probs.getNumCols(); int numOut = probs.getNumRows(); assert(labels.getNumElements() == numCases); assert(!labels.isTrans()); assert(!probs.isTrans()); assert(labels.isContiguous()); assert(probs.isContiguous()); NVMatrix& maxProbs = probs.max(0); labelLogProbs_out.resize(1, numCases); correctProbs_out.resize(1, numCases); dim3 threads(LOGREG_ERR_THREADS_X, 1); dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1); hipFuncSetCacheConfig(kLogregCost, hipFuncCachePreferL1); hipLaunchKernelGGL(( kLogregCost), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), maxProbs.getDevData(), labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), numCases, numOut); cutilCheckMsg("computeLogregCost: Kernel execution failed"); // hipDeviceSynchronize(); delete &maxProbs; } void computeLogregGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(!labels.isTrans()); assert(!probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(probs); hipLaunchKernelGGL(( kLogregCostGrad<false>), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { hipLaunchKernelGGL(( kLogregCostGrad<true>), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } cutilCheckMsg("computeLogregGrad: Kernel execution failed"); } void computeSoftmaxGrad(NVMatrix& acts, NVMatrix& actsGrad, NVMatrix& target, bool add) { int numCases = acts.getLeadingDim(); int numOut = acts.getFollowingDim(); assert(acts.isSameDims(actsGrad)); assert(acts.isContiguous()); assert(actsGrad.isContiguous()); assert(target.isContiguous()); assert(acts.isTrans()); assert(actsGrad.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(acts); hipLaunchKernelGGL(( kSoftmaxGrad<false>), dim3(blocks), dim3(threads), 0, 0, actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut); } else { hipLaunchKernelGGL(( kSoftmaxGrad<true>), dim3(blocks), dim3(threads), 0, 0, actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut); } cutilCheckMsg("computeSoftmaxGrad: Kernel execution failed"); } void computeLogregSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(probs); hipLaunchKernelGGL(( kLogregSoftmaxGrad<false>), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { hipLaunchKernelGGL(( kLogregSoftmaxGrad<true>), dim3(blocks), dim3(threads), 0, 0, probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } cutilCheckMsg("computeLogregSoftmaxGrad: Kernel execution failed"); } //------------------------------------------------------- // functions related with dropc //------------------------------------------------------- void computeFCDropC_fprop( NVMatrix& x, ///<[in] input matrix x, col major, numData x inDim NVMatrix& w, ///<[in] weight matrix w, col major, inDim x outDim NVMatrix& b, ///<[in] bias matrix, row major, 1 x outDim NVMatrix& mw, ///<[in] maskWeights, col major, inDim x (outDimxnumData) NVMatrix& mb, ///<[in] maskBiases, col major, dataDim x outDim NVMatrix& y ///<[in,out] target matrix y, col major, dataDim x outDim ){ // pre-condition check assert( x.isTrans() ); int numData = x.getNumRows(); int inDim = x.getNumCols(); assert( w.isTrans() ); assert( w.getNumRows() == inDim ); int outDim = w.getNumCols(); assert( !b.isTrans() ); assert( b.getNumRows() == 1 && b.getNumCols() == outDim ); assert( mw.isTrans() ); assert( mw.getNumRows() == inDim && mw.getNumCols() == (outDim*numData) ); assert( mb.isTrans() ); assert( mb.getNumRows() == numData&& mb.getNumCols() == outDim ); assert( y.isTrans() ); assert( y.getNumRows() == numData && y.getNumCols() == outDim ); // call dev function computeFCDropC_fprop_d( x.getDevData(), w.getDevData(), b.getDevData(), // input matrix //m, n, d, // dims outDim, inDim, numData, mw.getDevData(), mb.getDevData(), // masks y.getDevData() // output ); } void computeFCDropC_bpropActs( NVMatrix& v, ///<[in] bprop act from previous layer, col major,numData x outDim NVMatrix& w, ///<[in] weight matrix w, col major, inDim x outDim float scale_g, ///<[in] input gradient scale NVMatrix& mw, ///<[in] maskWeights, col major, inDim x (outDimxnumData) NVMatrix& da, ///<[in,out] d-active, col major, numData x inDim float scale_da ///<[in] da scale ){ // pre-condition check assert( v.isTrans() ); int numData = v.getNumRows(); int outDim = v.getNumCols(); assert( w.isTrans() ); int inDim = w.getNumRows(); assert( w.getNumCols() == outDim ); assert( mw.isTrans() ); assert( mw.getNumRows() == inDim && mw.getNumCols() == (outDim*numData) ); assert( da.isTrans() ); assert( da.getNumRows() == numData && da.getNumCols() == inDim ); // call dev function computeFCDropC_bpropActs_d( v.getDevData(), w.getDevData(), //m, n, d, outDim, inDim, numData, scale_g, mw.getDevData(), da.getDevData(), scale_da ); } void computeFCDropC_bpropWeights( NVMatrix& a, ///<[in] prev activation matrix, col major, numData x inDim NVMatrix& v, ///<[in] gradient matrix, col major, numData x outDim float scale_g, ///<[in] inc scale NVMatrix& mw, ///<[in] maskWeights, col major, inDim x (outDimxnumData) NVMatrix& dw, ///<[in,out] w gradient, col major, inDim x outDim float scale_dw ///<[in] gradient scale ){ // pre-condition check assert( a.isTrans() ); int numData = a.getNumRows(); int inDim = a.getNumCols(); assert( v.isTrans() ); assert( v.getNumRows() == numData ); int outDim = v.getNumCols(); assert( mw.isTrans() ); assert( mw.getNumRows() == inDim && mw.getNumCols() == (outDim*numData) ); assert( dw.isTrans() ); assert( dw.getNumRows() == inDim && dw.getNumCols() == outDim ); // call dev function computeFCDropC_bpropWeights_d( a.getDevData(), v.getDevData(), //m, n, d, outDim, inDim, numData, scale_g, mw.getDevData(), dw.getDevData(), scale_dw ); } void computeFCDropC_bit_fprop( NVMatrix& x, ///<[in] input matrix x, col major, numData x inDim NVMatrix& w, ///<[in] weight matrix w, col major, inDim x outDim NVMatrix& b, ///<[in] bias matrix, row major, 1 x outDim const MaskWeights& mw, ///<[in] maskWeights object NVMatrix& mb, ///<[in] maskBiases, col major, dataDim x outDim NVMatrix& y ///<[in,out] target matrix y, col major, dataDim x outDim ){ // pre-condition check assert( x.isTrans() ); int numData = x.getNumRows(); int inDim = x.getNumCols(); assert( w.isTrans() ); assert( w.getNumRows() == inDim ); int outDim = w.getNumCols(); assert( !b.isTrans() ); assert( b.getNumRows() == 1 && b.getNumCols() == outDim ); assert( mb.isTrans() ); assert( mb.getNumRows() == numData&& mb.getNumCols() == outDim ); assert( y.isTrans() ); assert( y.getNumRows() == numData && y.getNumCols() == outDim ); // call dev function computeFCDropC_bit_fprop_d( x.getDevData(), w.getDevData(), b.getDevData(), // input matrix //m, n, d, // dims outDim, inDim, numData, mw, //mask w mb.getDevData(), // mask b y.getDevData() // output ); } void computeFCDropC_bit_bpropActs( NVMatrix& v, ///<[in] bprop act from previous layer, col major,numData x outDim NVMatrix& w, ///<[in] weight matrix w, col major, inDim x outDim float scale_g, ///<[in] input gradient scale const MaskWeights& mw, ///<[in] maskWeights object NVMatrix& da, ///<[in,out] d-active, col major, numData x inDim float scale_da ///<[in] da scale ){ // pre-condition check assert( v.isTrans() ); int numData = v.getNumRows(); int outDim = v.getNumCols(); assert( w.isTrans() ); int inDim = w.getNumRows(); assert( w.getNumCols() == outDim ); assert( da.isTrans() ); assert( da.getNumRows() == numData && da.getNumCols() == inDim ); // call dev function computeFCDropC_bit_bpropActs_d( v.getDevData(), w.getDevData(), //m, n, d, outDim, inDim, numData, scale_g, mw, da.getDevData(), scale_da ); } void computeFCDropC_bit_bpropWeights( NVMatrix& a, ///<[in] prev activation matrix, col major, numData x inDim NVMatrix& v, ///<[in] gradient matrix, col major, numData x outDim float scale_g, ///<[in] inc scale const MaskWeights& mw, ///<[in] maskWeights object NVMatrix& dw, ///<[in,out] w gradient, col major, inDim x outDim float scale_dw ///<[in] gradient scale ){ // pre-condition check assert( a.isTrans() ); int numData = a.getNumRows(); int inDim = a.getNumCols(); assert( v.isTrans() ); assert( v.getNumRows() == numData ); int outDim = v.getNumCols(); assert( dw.isTrans() ); assert( dw.getNumRows() == inDim && dw.getNumCols() == outDim ); // call dev function computeFCDropC_bit_bpropWeights_d( a.getDevData(), v.getDevData(), //m, n, d, outDim, inDim, numData, scale_g, mw, dw.getDevData(), scale_dw ); } void computeFCDropC_bit_inference( NVMatrix& mu, ///<[in] mean matrix, col major, dataDim x outDim NVMatrix& var, ///<[in] var matrix, col major, dataDim x outDim int numSamples, ///<[in] number of samples for mc sampling NVMatrix& y ///<[in,out] target matrix y, col major, dataDim x outDim ){ int numData = mu.getNumRows(); int outDim = mu.getNumCols(); size_t num_elements = numData * outDim; assert( mu.isTrans() ); assert( var.getNumRows() == numData ); assert( var.getNumCols() == outDim ); assert( var.isTrans() ); assert( y.getNumRows() == numData ); assert( y.getNumCols() == outDim ); assert( y.isTrans() ); // call dev funtion computeFCDropC_bit_inference_d( mu.getDevData(), var.getDevData(), num_elements, numSamples, y.getDevData()); }
40d17cc1e5441d12596dc50b7dc34172397541ea.cu
/* * Copyright (c) 2011, Alex Krizhevsky ([email protected]) * All rights reserved. * * Redistribution and use in source and binary forms, with or without modification, * are permitted provided that the following conditions are met: * * - Redistributions of source code must retain the above copyright notice, * this list of conditions and the following disclaimer. * * - Redistributions in binary form must reproduce the above copyright notice, * this list of conditions and the following disclaimer in the documentation * and/or other materials provided with the distribution. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING * NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ #include <assert.h> #include <layer_kernels.cuh> #include "dropc/dropc_dev.hpp" #include "dropc/dropc_bit_dev.hpp" /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ __global__ void kLogregCost(float* probs, float* labels, float* maxProbs, float* labelLogProbs, float* correctProbs, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_ERR_THREADS_X + threadIdx.x; if (tx < numCases) { const int label = int(labels[tx]); const float maxp = maxProbs[tx]; const float labelp = probs[label * numCases + tx]; labelLogProbs[tx] = __logf(labelp); /* * Compute the probability of guessing the correct case if you take the most-probable label. * * This is done like this: * * - If the most probable label is not equal to the true label, then the probability is zero. * - Otherwise, the probability is 1 / (number of labels whose probability is equal to the maximum). * * This is certainly overkill -- in practice, it's just about impossible for two labels to get assigned * maximum probability. But it's a safety measure to prevent over-estimating your accuracy. * Though it could never happen in reality. Well it could. But it wouldn't. Cool? */ if (labelp != maxp) { correctProbs[tx] = 0; } else { int numMax = 0; for (int i = 0; i < numOut; i++) { numMax += probs[i * numCases + tx] == maxp; } correctProbs[tx] = 1.0f / float(numMax); } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dy_l: (numOut, numCases) */ template <bool add> __global__ void kLogregCostGrad(float* y_l, float* labels, float* dE_dy_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * (label == ty); v = __fdividef(v, y_l[tidx]); if (add) { dE_dy_l[tidx] += v; } else { dE_dy_l[tidx] = v; } } } /* * dE_dy_l: (numOut, numCases) * y_l: (numOut, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kSoftmaxGrad(float* dE_dy_l, float* y_l, float* dE_dx_l, const int numCases, const int numOut) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { float v = 0; for (int j = 0; j < numOut; j++) { v += dE_dy_l[j * numCases + tx] * ((j == ty) - y_l[j * numCases + tx]); } v *= y_l[tidx]; if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } /* * E = -log(y_t) * y_l: (numOut, numCases) * labels: (1, numCases) * * dE_dx_l: (numOut, numCases) */ template <bool add> __global__ void kLogregSoftmaxGrad(float* y_l, float* labels, float* dE_dx_l, const int numCases, const int numOut, const float gradCoeff) { const int tx = blockIdx.x * LOGREG_GRAD_THREADS_X + threadIdx.x; const int ty = blockIdx.y * LOGREG_GRAD_THREADS_Y + threadIdx.y; const int tidx = ty * numCases + tx; if (ty < numOut && tx < numCases) { const int label = int(labels[tx]); float v = gradCoeff * ((label == ty) - y_l[tidx]); if (add) { dE_dx_l[tidx] += v; } else { dE_dx_l[tidx] = v; } } } template <int B_X, bool add> __global__ void kEltwiseMaxGrad(float* actGrad, float* input, float* output, float* target, const int numElements) { for (int i = B_X * blockIdx.x + threadIdx.x; i < numElements; i += B_X * gridDim.x) { if (add) { target[i] += actGrad[i] * (output[i] == input[i]); } else { target[i] = actGrad[i] * (output[i] == input[i]); } } } void computeEltwiseMaxGrad(NVMatrix& actGrad, NVMatrix& input, NVMatrix& output, NVMatrix& target, bool add) { assert(actGrad.isContiguous()); assert(output.isContiguous()); assert(input.isContiguous()); assert(actGrad.isSameDims(input)); assert(actGrad.isSameDims(output)); dim3 blocks(DIVUP(actGrad.getNumElements(), 128)); dim3 threads(128); if (add) { assert(actGrad.isSameDims(target)); cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, true>, cudaFuncCachePreferL1); kEltwiseMaxGrad<128, true><<<blocks, threads>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } else { target.resize(actGrad); cudaFuncSetCacheConfig(kEltwiseMaxGrad<128, false>, cudaFuncCachePreferL1); kEltwiseMaxGrad<128, false><<<blocks, threads>>>(actGrad.getDevData(), input.getDevData(), output.getDevData(), target.getDevData(), actGrad.getNumElements()); } cutilCheckMsg("computeEltwiseMaxGrad: Kernel execution failed"); } /* * E = -log(y_t) * probs: (numOut, numCases) * labels: (1, numCases) * maxProbs: (1, numCases) * labelLogProbs: (1, numCases) (*out) * correctProbs: (1, numCases) (*out) * * target: (1, numCases) */ void computeLogregCost(NVMatrix& labels, NVMatrix& probs, NVMatrix& labelLogProbs_out, NVMatrix& correctProbs_out) { int numCases = probs.getNumCols(); int numOut = probs.getNumRows(); assert(labels.getNumElements() == numCases); assert(!labels.isTrans()); assert(!probs.isTrans()); assert(labels.isContiguous()); assert(probs.isContiguous()); NVMatrix& maxProbs = probs.max(0); labelLogProbs_out.resize(1, numCases); correctProbs_out.resize(1, numCases); dim3 threads(LOGREG_ERR_THREADS_X, 1); dim3 blocks(DIVUP(numCases, LOGREG_ERR_THREADS_X), 1); cudaFuncSetCacheConfig(kLogregCost, cudaFuncCachePreferL1); kLogregCost<<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), maxProbs.getDevData(), labelLogProbs_out.getDevData(), correctProbs_out.getDevData(), numCases, numOut); cutilCheckMsg("computeLogregCost: Kernel execution failed"); // cudaThreadSynchronize(); delete &maxProbs; } void computeLogregGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(!labels.isTrans()); assert(!probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(probs); kLogregCostGrad<false><<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { kLogregCostGrad<true><<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } cutilCheckMsg("computeLogregGrad: Kernel execution failed"); } void computeSoftmaxGrad(NVMatrix& acts, NVMatrix& actsGrad, NVMatrix& target, bool add) { int numCases = acts.getLeadingDim(); int numOut = acts.getFollowingDim(); assert(acts.isSameDims(actsGrad)); assert(acts.isContiguous()); assert(actsGrad.isContiguous()); assert(target.isContiguous()); assert(acts.isTrans()); assert(actsGrad.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(acts); kSoftmaxGrad<false><<<blocks, threads>>>(actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut); } else { kSoftmaxGrad<true><<<blocks, threads>>>(actsGrad.getDevData(), acts.getDevData(), target.getDevData(), numCases, numOut); } cutilCheckMsg("computeSoftmaxGrad: Kernel execution failed"); } void computeLogregSoftmaxGrad(NVMatrix& labels, NVMatrix& probs, NVMatrix& target, bool add, float coeff) { int numCases = probs.getLeadingDim(); int numOut = probs.getFollowingDim(); assert(labels.getNumElements() == numCases); assert(probs.isContiguous()); assert(target.isContiguous()); assert(labels.isContiguous()); assert(probs.isTrans()); dim3 threads(LOGREG_GRAD_THREADS_X, LOGREG_GRAD_THREADS_Y); dim3 blocks(DIVUP(numCases, LOGREG_GRAD_THREADS_X), DIVUP(numOut, LOGREG_GRAD_THREADS_Y)); if (!add) { target.resize(probs); kLogregSoftmaxGrad<false><<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } else { kLogregSoftmaxGrad<true><<<blocks, threads>>>(probs.getDevData(), labels.getDevData(), target.getDevData(), numCases, numOut, coeff); } cutilCheckMsg("computeLogregSoftmaxGrad: Kernel execution failed"); } //------------------------------------------------------- // functions related with dropc //------------------------------------------------------- void computeFCDropC_fprop( NVMatrix& x, ///<[in] input matrix x, col major, numData x inDim NVMatrix& w, ///<[in] weight matrix w, col major, inDim x outDim NVMatrix& b, ///<[in] bias matrix, row major, 1 x outDim NVMatrix& mw, ///<[in] maskWeights, col major, inDim x (outDimxnumData) NVMatrix& mb, ///<[in] maskBiases, col major, dataDim x outDim NVMatrix& y ///<[in,out] target matrix y, col major, dataDim x outDim ){ // pre-condition check assert( x.isTrans() ); int numData = x.getNumRows(); int inDim = x.getNumCols(); assert( w.isTrans() ); assert( w.getNumRows() == inDim ); int outDim = w.getNumCols(); assert( !b.isTrans() ); assert( b.getNumRows() == 1 && b.getNumCols() == outDim ); assert( mw.isTrans() ); assert( mw.getNumRows() == inDim && mw.getNumCols() == (outDim*numData) ); assert( mb.isTrans() ); assert( mb.getNumRows() == numData&& mb.getNumCols() == outDim ); assert( y.isTrans() ); assert( y.getNumRows() == numData && y.getNumCols() == outDim ); // call dev function computeFCDropC_fprop_d( x.getDevData(), w.getDevData(), b.getDevData(), // input matrix //m, n, d, // dims outDim, inDim, numData, mw.getDevData(), mb.getDevData(), // masks y.getDevData() // output ); } void computeFCDropC_bpropActs( NVMatrix& v, ///<[in] bprop act from previous layer, col major,numData x outDim NVMatrix& w, ///<[in] weight matrix w, col major, inDim x outDim float scale_g, ///<[in] input gradient scale NVMatrix& mw, ///<[in] maskWeights, col major, inDim x (outDimxnumData) NVMatrix& da, ///<[in,out] d-active, col major, numData x inDim float scale_da ///<[in] da scale ){ // pre-condition check assert( v.isTrans() ); int numData = v.getNumRows(); int outDim = v.getNumCols(); assert( w.isTrans() ); int inDim = w.getNumRows(); assert( w.getNumCols() == outDim ); assert( mw.isTrans() ); assert( mw.getNumRows() == inDim && mw.getNumCols() == (outDim*numData) ); assert( da.isTrans() ); assert( da.getNumRows() == numData && da.getNumCols() == inDim ); // call dev function computeFCDropC_bpropActs_d( v.getDevData(), w.getDevData(), //m, n, d, outDim, inDim, numData, scale_g, mw.getDevData(), da.getDevData(), scale_da ); } void computeFCDropC_bpropWeights( NVMatrix& a, ///<[in] prev activation matrix, col major, numData x inDim NVMatrix& v, ///<[in] gradient matrix, col major, numData x outDim float scale_g, ///<[in] inc scale NVMatrix& mw, ///<[in] maskWeights, col major, inDim x (outDimxnumData) NVMatrix& dw, ///<[in,out] w gradient, col major, inDim x outDim float scale_dw ///<[in] gradient scale ){ // pre-condition check assert( a.isTrans() ); int numData = a.getNumRows(); int inDim = a.getNumCols(); assert( v.isTrans() ); assert( v.getNumRows() == numData ); int outDim = v.getNumCols(); assert( mw.isTrans() ); assert( mw.getNumRows() == inDim && mw.getNumCols() == (outDim*numData) ); assert( dw.isTrans() ); assert( dw.getNumRows() == inDim && dw.getNumCols() == outDim ); // call dev function computeFCDropC_bpropWeights_d( a.getDevData(), v.getDevData(), //m, n, d, outDim, inDim, numData, scale_g, mw.getDevData(), dw.getDevData(), scale_dw ); } void computeFCDropC_bit_fprop( NVMatrix& x, ///<[in] input matrix x, col major, numData x inDim NVMatrix& w, ///<[in] weight matrix w, col major, inDim x outDim NVMatrix& b, ///<[in] bias matrix, row major, 1 x outDim const MaskWeights& mw, ///<[in] maskWeights object NVMatrix& mb, ///<[in] maskBiases, col major, dataDim x outDim NVMatrix& y ///<[in,out] target matrix y, col major, dataDim x outDim ){ // pre-condition check assert( x.isTrans() ); int numData = x.getNumRows(); int inDim = x.getNumCols(); assert( w.isTrans() ); assert( w.getNumRows() == inDim ); int outDim = w.getNumCols(); assert( !b.isTrans() ); assert( b.getNumRows() == 1 && b.getNumCols() == outDim ); assert( mb.isTrans() ); assert( mb.getNumRows() == numData&& mb.getNumCols() == outDim ); assert( y.isTrans() ); assert( y.getNumRows() == numData && y.getNumCols() == outDim ); // call dev function computeFCDropC_bit_fprop_d( x.getDevData(), w.getDevData(), b.getDevData(), // input matrix //m, n, d, // dims outDim, inDim, numData, mw, //mask w mb.getDevData(), // mask b y.getDevData() // output ); } void computeFCDropC_bit_bpropActs( NVMatrix& v, ///<[in] bprop act from previous layer, col major,numData x outDim NVMatrix& w, ///<[in] weight matrix w, col major, inDim x outDim float scale_g, ///<[in] input gradient scale const MaskWeights& mw, ///<[in] maskWeights object NVMatrix& da, ///<[in,out] d-active, col major, numData x inDim float scale_da ///<[in] da scale ){ // pre-condition check assert( v.isTrans() ); int numData = v.getNumRows(); int outDim = v.getNumCols(); assert( w.isTrans() ); int inDim = w.getNumRows(); assert( w.getNumCols() == outDim ); assert( da.isTrans() ); assert( da.getNumRows() == numData && da.getNumCols() == inDim ); // call dev function computeFCDropC_bit_bpropActs_d( v.getDevData(), w.getDevData(), //m, n, d, outDim, inDim, numData, scale_g, mw, da.getDevData(), scale_da ); } void computeFCDropC_bit_bpropWeights( NVMatrix& a, ///<[in] prev activation matrix, col major, numData x inDim NVMatrix& v, ///<[in] gradient matrix, col major, numData x outDim float scale_g, ///<[in] inc scale const MaskWeights& mw, ///<[in] maskWeights object NVMatrix& dw, ///<[in,out] w gradient, col major, inDim x outDim float scale_dw ///<[in] gradient scale ){ // pre-condition check assert( a.isTrans() ); int numData = a.getNumRows(); int inDim = a.getNumCols(); assert( v.isTrans() ); assert( v.getNumRows() == numData ); int outDim = v.getNumCols(); assert( dw.isTrans() ); assert( dw.getNumRows() == inDim && dw.getNumCols() == outDim ); // call dev function computeFCDropC_bit_bpropWeights_d( a.getDevData(), v.getDevData(), //m, n, d, outDim, inDim, numData, scale_g, mw, dw.getDevData(), scale_dw ); } void computeFCDropC_bit_inference( NVMatrix& mu, ///<[in] mean matrix, col major, dataDim x outDim NVMatrix& var, ///<[in] var matrix, col major, dataDim x outDim int numSamples, ///<[in] number of samples for mc sampling NVMatrix& y ///<[in,out] target matrix y, col major, dataDim x outDim ){ int numData = mu.getNumRows(); int outDim = mu.getNumCols(); size_t num_elements = numData * outDim; assert( mu.isTrans() ); assert( var.getNumRows() == numData ); assert( var.getNumCols() == outDim ); assert( var.isTrans() ); assert( y.getNumRows() == numData ); assert( y.getNumCols() == outDim ); assert( y.isTrans() ); // call dev funtion computeFCDropC_bit_inference_d( mu.getDevData(), var.getDevData(), num_elements, numSamples, y.getDevData()); }
7164e3b6bbeded677cd2d9cc1a254d88bfad304b.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N) { // shared memory size declared at kernel launch extern __shared__ float sNormData[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; if(tid==0) sNormData[0] = g_NormW[0]; __syncthreads(); // For thread ids greater than data space if (globalid < N) { g_VecV[globalid] = g_VecW[globalid]/sNormData[0]; } }
7164e3b6bbeded677cd2d9cc1a254d88bfad304b.cu
__global__ void NormalizeW(float* g_VecW, float * g_NormW, float* g_VecV, int N) { // shared memory size declared at kernel launch extern __shared__ float sNormData[]; unsigned int tid = threadIdx.x; unsigned int globalid = blockIdx.x*blockDim.x + threadIdx.x; if(tid==0) sNormData[0] = g_NormW[0]; __syncthreads(); // For thread ids greater than data space if (globalid < N) { g_VecV[globalid] = g_VecW[globalid]/sNormData[0]; } }
a6c5648192527488bf0da61d9459c6dc48649bf6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "THH.h" #include "luaT.h" #include <lua.h> #include <lauxlib.h> #include <lualib.h> #include "THHTensorRandom.h" #include "THHDeviceUtils.cuh" #include "THHReduceApplyUtils.cuh" #include "THHApply.cuh" #include "THHGeneral.h" #include "THHTensorCopy.h" #include "THHTensorMath.h" #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/for_each.h> #include <thrust/device_vector.h> #include <thrust/iterator/zip_iterator.h> struct encode_functor { const int n; encode_functor(int _n) : n(_n) {} template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<3>(t) = thrust::get<0>(t) + (thrust::get<1>(t)-1) * n; } }; __global__ void encode(int N, int n, float *input0, float *input1, float *output) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) { output[i] = input0[i] + (input1[i]-1)*n; } } struct decode1_functor { const float a; decode1_functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float& x, const float& y) const { return a * x + y; } }; struct decode2_functor { const float a; decode2_functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float& x, const float& y) const { return a * x + y; } }; __global__ void decode(int N, int n, float *input, float *output0, float *output1) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) { int x = floorf(input[i] + 0.5f)-1; output0[i] = (x % n) + 1; output1[i] = (x / n) + 1; } } static int jhu_THCEncode(lua_State *L) { int narg = lua_gettop(L); if (narg != 4) { THError("expecting exactly 4 arguments"); } THCState *state = getCutorchState(L); THCudaTensor *input0 = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input1 = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); long N = lua_tonumber(L, 4); if(!THCudaTensor_isContiguous(state, input0) || !THCudaTensor_isContiguous(state, input1) || !THCudaTensor_isContiguous(state, output)) { THError("tensor arguments must be contiguous"); } float *input_data0 = THCudaTensor_data(state, input0); float *input_data1 = THCudaTensor_data(state, input1); float *output_data = THCudaTensor_data(state, output); int nelem1 = THCudaTensor_size(state, input0, 0); int nelem2 = THCudaTensor_size(state, input1, 0); int ndim1 = THCudaTensor_nDimension(state, input0); int ndim2 = THCudaTensor_nDimension(state, input1); if (ndim1 != ndim2) THError("dim mismatch"); if (nelem1 != nelem2) THError("size mismatch"); hipLaunchKernelGGL(( encode), dim3((nelem1+255)/256), dim3(256), 0, 0, nelem1, N, input_data0, input_data1, output_data); /////////////////////////// THRUST VERSION DOESN'T COMPILE /////////////////////////// // thrust::device_ptr<float> input0_start = thrust::device_pointer_cast(input_data0); // thrust::device_ptr<float> input0_stop = input0_start + nelem1; // thrust::device_ptr<float> input1_start = thrust::device_pointer_cast(input_data1); // thrust::device_ptr<float> input1_stop = input1_start + nelem1; // thrust::device_ptr<float> output_start = thrust::device_pointer_cast(input_data1); // thrust::device_ptr<float> output_stop = output_start + nelem1; // thrust::device_vector<float> ivec1(input0_start, input0_stop); // thrust::device_vector<float> ivec2(input1_start, input1_stop); // thrust::device_vector<float> ovec1(output_start, output_stop); // typedef thrust::device_vector<float>::iterator FloatIterator; // typedef thrust::tuple<FloatIterator, FloatIterator, FloatIterator> IteratorTuple; // typedef thrust::zip_iterator<IteratorTuple> ZipIterator; // // finally, create the zip_iterators // ZipIterator start_iter(thrust::make_tuple(ivec1.begin(), ivec2.begin(), ovec1.begin())); // ZipIterator end_iter(thrust::make_tuple(ivec1.end(), ivec2.end(), ovec1.end())); // // apply the transformation // thrust::for_each(start_iter, end_iter, encode_functor(N)); return 0; } static int jhu_THCDecode(lua_State *L) { int narg = lua_gettop(L); if (narg != 4) { THError("expecting exactly 4 arguments"); } THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *output0 = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output1 = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); long N = lua_tonumber(L, 4); float *input_data = THCudaTensor_data(state, input); float *output_data0 = THCudaTensor_data(state, output0); float *output_data1 = THCudaTensor_data(state, output1); int nelem0 = THCudaTensor_size(state, input, 0); int nelem1 = THCudaTensor_size(state, output0, 0); int nelem2 = THCudaTensor_size(state, output1, 0); int ndim1 = THCudaTensor_nDimension(state, output0); int ndim2 = THCudaTensor_nDimension(state, output1); if (ndim1 != ndim2) THError("dim mismatch"); if (nelem0 != nelem1) THError("size mismatch"); if (nelem1 != nelem2) THError("size mismatch"); hipLaunchKernelGGL(( decode), dim3((nelem0+255)/256), dim3(256), 0, 0, nelem0, N, input_data, output_data0, output_data1); return 0; } static const struct luaL_Reg jhu_THEncode__ [] = { {"encode", jhu_THCEncode}, {"decode", jhu_THCDecode}, {0, 0} }; static void jhu_THCEncode_init(lua_State *L) { int ret = luaT_pushmetatable(L, "torch.CudaTensor"); if(ret == 0) { THError("problem pushing metatable"); } luaT_registeratname(L, jhu_THEncode__, "jhu"); lua_pop(L, 1); }
a6c5648192527488bf0da61d9459c6dc48649bf6.cu
#include "THC.h" #include "luaT.h" #include <lua.h> #include <lauxlib.h> #include <lualib.h> #include "THCTensorRandom.h" #include "THCDeviceUtils.cuh" #include "THCReduceApplyUtils.cuh" #include "THCApply.cuh" #include "THCGeneral.h" #include "THCTensorCopy.h" #include "THCTensorMath.h" #include <thrust/device_ptr.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/generate.h> #include <thrust/reduce.h> #include <thrust/functional.h> #include <thrust/for_each.h> #include <thrust/device_vector.h> #include <thrust/iterator/zip_iterator.h> struct encode_functor { const int n; encode_functor(int _n) : n(_n) {} template <typename Tuple> __host__ __device__ void operator()(Tuple t) { thrust::get<3>(t) = thrust::get<0>(t) + (thrust::get<1>(t)-1) * n; } }; __global__ void encode(int N, int n, float *input0, float *input1, float *output) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) { output[i] = input0[i] + (input1[i]-1)*n; } } struct decode1_functor { const float a; decode1_functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float& x, const float& y) const { return a * x + y; } }; struct decode2_functor { const float a; decode2_functor(float _a) : a(_a) {} __host__ __device__ float operator()(const float& x, const float& y) const { return a * x + y; } }; __global__ void decode(int N, int n, float *input, float *output0, float *output1) { int i = blockIdx.x*blockDim.x + threadIdx.x; if (i < N) { int x = floorf(input[i] + 0.5f)-1; output0[i] = (x % n) + 1; output1[i] = (x / n) + 1; } } static int jhu_THCEncode(lua_State *L) { int narg = lua_gettop(L); if (narg != 4) { THError("expecting exactly 4 arguments"); } THCState *state = getCutorchState(L); THCudaTensor *input0 = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *input1 = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); long N = lua_tonumber(L, 4); if(!THCudaTensor_isContiguous(state, input0) || !THCudaTensor_isContiguous(state, input1) || !THCudaTensor_isContiguous(state, output)) { THError("tensor arguments must be contiguous"); } float *input_data0 = THCudaTensor_data(state, input0); float *input_data1 = THCudaTensor_data(state, input1); float *output_data = THCudaTensor_data(state, output); int nelem1 = THCudaTensor_size(state, input0, 0); int nelem2 = THCudaTensor_size(state, input1, 0); int ndim1 = THCudaTensor_nDimension(state, input0); int ndim2 = THCudaTensor_nDimension(state, input1); if (ndim1 != ndim2) THError("dim mismatch"); if (nelem1 != nelem2) THError("size mismatch"); encode<<<(nelem1+255)/256, 256>>>(nelem1, N, input_data0, input_data1, output_data); /////////////////////////// THRUST VERSION DOESN'T COMPILE /////////////////////////// // thrust::device_ptr<float> input0_start = thrust::device_pointer_cast(input_data0); // thrust::device_ptr<float> input0_stop = input0_start + nelem1; // thrust::device_ptr<float> input1_start = thrust::device_pointer_cast(input_data1); // thrust::device_ptr<float> input1_stop = input1_start + nelem1; // thrust::device_ptr<float> output_start = thrust::device_pointer_cast(input_data1); // thrust::device_ptr<float> output_stop = output_start + nelem1; // thrust::device_vector<float> ivec1(input0_start, input0_stop); // thrust::device_vector<float> ivec2(input1_start, input1_stop); // thrust::device_vector<float> ovec1(output_start, output_stop); // typedef thrust::device_vector<float>::iterator FloatIterator; // typedef thrust::tuple<FloatIterator, FloatIterator, FloatIterator> IteratorTuple; // typedef thrust::zip_iterator<IteratorTuple> ZipIterator; // // finally, create the zip_iterators // ZipIterator start_iter(thrust::make_tuple(ivec1.begin(), ivec2.begin(), ovec1.begin())); // ZipIterator end_iter(thrust::make_tuple(ivec1.end(), ivec2.end(), ovec1.end())); // // apply the transformation // thrust::for_each(start_iter, end_iter, encode_functor(N)); return 0; } static int jhu_THCDecode(lua_State *L) { int narg = lua_gettop(L); if (narg != 4) { THError("expecting exactly 4 arguments"); } THCState *state = getCutorchState(L); THCudaTensor *input = (THCudaTensor *)luaT_checkudata(L, 1, "torch.CudaTensor"); THCudaTensor *output0 = (THCudaTensor *)luaT_checkudata(L, 2, "torch.CudaTensor"); THCudaTensor *output1 = (THCudaTensor *)luaT_checkudata(L, 3, "torch.CudaTensor"); long N = lua_tonumber(L, 4); float *input_data = THCudaTensor_data(state, input); float *output_data0 = THCudaTensor_data(state, output0); float *output_data1 = THCudaTensor_data(state, output1); int nelem0 = THCudaTensor_size(state, input, 0); int nelem1 = THCudaTensor_size(state, output0, 0); int nelem2 = THCudaTensor_size(state, output1, 0); int ndim1 = THCudaTensor_nDimension(state, output0); int ndim2 = THCudaTensor_nDimension(state, output1); if (ndim1 != ndim2) THError("dim mismatch"); if (nelem0 != nelem1) THError("size mismatch"); if (nelem1 != nelem2) THError("size mismatch"); decode<<<(nelem0+255)/256, 256>>>(nelem0, N, input_data, output_data0, output_data1); return 0; } static const struct luaL_Reg jhu_THEncode__ [] = { {"encode", jhu_THCEncode}, {"decode", jhu_THCDecode}, {0, 0} }; static void jhu_THCEncode_init(lua_State *L) { int ret = luaT_pushmetatable(L, "torch.CudaTensor"); if(ret == 0) { THError("problem pushing metatable"); } luaT_registeratname(L, jhu_THEncode__, "jhu"); lua_pop(L, 1); }
e8b7f1e2511bf5e7eb92bd6d2c8663fefd5eecb0.hip
// !!! This is a file automatically generated by hipify!!! #include "ParticleSpriteRenderer.cuh" #include <algorithm> #include <glm/gtc/type_ptr.hpp> #include <cuda_gl_interop.h> #include "CameraRotateCenter.hpp" #define BLUR true const int FBO_MARGIN = 50; void ParticleSpriteRenderer::init() { glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 5); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // OpenGL initialization glewExperimental = GL_TRUE; GLenum error = glewInit(); if (error != GLEW_OK) { std::cout << "PROBLEMS! CANNOT LOAD OpenGL!" << std::endl; throw std::runtime_error("Can't load GL"); } createFlareTexture(); createVaosVbos(); initShaders(); initFbos(); setUniforms(); } std::vector<float> genFlareTex(int tex_size) { std::vector<float> pixels(static_cast<unsigned long>(tex_size * tex_size)); float sigma2 = static_cast<float>(tex_size) / 2.0f; float A = 1.0; for (int i = 0; i < tex_size; ++i) { float i1 = i - tex_size / 2; for (int j = 0; j < tex_size; ++j) { float j1 = j - tex_size / 2; // gamma corrected gauss pixels[i * tex_size + j] = pow(A * exp(-((i1 * i1) / (2 * sigma2) + (j1 * j1) / (2 * sigma2))), 2.2f); } } return pixels; } void ParticleSpriteRenderer::createFlareTexture() { texSize = 16; glCreateTextures(GL_TEXTURE_2D, 1, &flareTex); glTextureStorage2D(flareTex, 1, GL_R32F, texSize, texSize); glTextureParameteri(flareTex, GL_TEXTURE_MIN_FILTER, GL_LINEAR); { std::vector<float> pixels = genFlareTex(texSize); glTextureSubImage2D(flareTex, 0, 0, 0, texSize, texSize, GL_RED, GL_FLOAT, pixels.data()); } } void ParticleSpriteRenderer::createVaosVbos() { // Particle VAO glCreateVertexArrays(1, &vaoParticles); glCreateBuffers(1, &vboParticlesPos); // glCreateBuffers(1, &ssboVelocities); glVertexArrayVertexBuffer(vaoParticles, 0, vboParticlesPos, 0, sizeof(glm::vec4)); // glVertexArrayVertexBuffer(vaoParticles, 1, ssboVelocities, 0, sizeof(glm::vec4)); // Position glEnableVertexArrayAttrib(vaoParticles, 0); glVertexArrayAttribFormat(vaoParticles, 0, 4, GL_FLOAT, GL_FALSE, 0); glVertexArrayAttribBinding(vaoParticles, 0, 0); // Velocity glEnableVertexArrayAttrib(vaoParticles, 1); glVertexArrayAttribFormat(vaoParticles, 1, 4, GL_FLOAT, GL_FALSE, 0); glVertexArrayAttribBinding(vaoParticles, 1, 1); // Deferred VAO glCreateVertexArrays(1, &vaoDeferred); glCreateBuffers(1, &vboDeferred); glVertexArrayVertexBuffer(vaoDeferred, 0, vboDeferred, 0, sizeof(glm::vec2)); // Position glEnableVertexArrayAttrib(vaoDeferred, 0); glVertexArrayAttribFormat(vaoDeferred, 0, 2, GL_FLOAT, GL_FALSE, 0); glVertexArrayAttribBinding(vaoDeferred, 0, 0); // Deferred tri glm::vec2 tri[3] = { glm::vec2(-2, -1), glm::vec2(+2, -1), glm::vec2(0, 4)}; glNamedBufferStorage(vboDeferred, 3 * sizeof(glm::vec2), tri, 0); } void ParticleSpriteRenderer::initShaders() { programHdr.source(GL_VERTEX_SHADER, "shaders/main.vert"); programHdr.source(GL_FRAGMENT_SHADER, "shaders/main.frag"); programHdr.source(GL_GEOMETRY_SHADER, "shaders/main.geom"); programHdr.link(); programTonemap.source(GL_VERTEX_SHADER, "shaders/deferred.vert"); programTonemap.source(GL_FRAGMENT_SHADER, "shaders/tonemap.frag"); programTonemap.link(); if (BLUR) { programBlur.source(GL_VERTEX_SHADER, "shaders/deferred.vert"); programBlur.source(GL_FRAGMENT_SHADER, "shaders/blur.frag"); programBlur.link(); } programLum.source(GL_VERTEX_SHADER, "shaders/deferred.vert"); programLum.source(GL_FRAGMENT_SHADER, "shaders/luminance.frag"); programLum.link(); } void ParticleSpriteRenderer::initFbos() { int blur_dsc = 2; blurDownscale = blur_dsc; glCreateFramebuffers(4, fbos); glCreateTextures(GL_TEXTURE_2D, 4, attachs); int base_width = camera->getWindowWidth() + 2 * FBO_MARGIN; int base_height = camera->getWindowHeight() + 2 * FBO_MARGIN; int widths[] = {base_width, base_width / blur_dsc, base_width / blur_dsc, base_width / 2}; int heights[] = {base_height, base_height / blur_dsc, base_height / blur_dsc, base_height / 2}; lumLod = (int) floor(log2((double) max(base_width, base_height) / 2)); int mipmaps[] = {1, 1, 1, lumLod + 1}; GLenum types[] = {GL_RGBA16F, GL_RGBA16F, GL_RGBA16F, GL_R16F}; GLenum min_filters[] = {GL_LINEAR, GL_LINEAR, GL_LINEAR, GL_LINEAR_MIPMAP_LINEAR}; for (int i = 0; i < 4; ++i) { glTextureStorage2D(attachs[i], mipmaps[i], types[i], widths[i], heights[i]); glTextureParameteri(attachs[i], GL_TEXTURE_MIN_FILTER, min_filters[i]); glTextureParameteri(attachs[i], GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTextureParameteri(attachs[i], GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glNamedFramebufferTexture(fbos[i], GL_COLOR_ATTACHMENT0, attachs[i], 0); } } void ParticleSpriteRenderer::setUniforms() { // const Uniforms // glProgramUniform1f(programInteraction.getId(), 0, SIM_dt); // glProgramUniform1f(programInteraction.getId(), 1, G); // glProgramUniform1f(programInteraction.getId(), 2, DAMPING); // glProgramUniform1f(programIntegration.getId(), 0, SIM_dt); // NDC sprite size glProgramUniform2f(programHdr.getId(), 8, texSize / float(2 * camera->getWindowWidth()), texSize / float(2 * camera->getWindowHeight())); // Blur sample offset length glProgramUniform2f(programBlur.getId(), 0, (float) blurDownscale / camera->getWindowWidth(), (float) blurDownscale / camera->getWindowHeight()); } void ParticleSpriteRenderer::render(float frameTime) { camera->applyInput(); // Particle HDR rendering glViewport(0, 0, camera->getWindowWidth() + 2 * FBO_MARGIN, camera->getWindowHeight() + 2 * FBO_MARGIN); glBindVertexArray(vaoParticles); glEnable(GL_BLEND); glBlendFunc(GL_ONE, GL_ONE); glBindFramebuffer(GL_FRAMEBUFFER, fbos[0]); glUseProgram(programHdr.getId()); glClear(GL_COLOR_BUFFER_BIT); glProgramUniformMatrix4fv(programHdr.getId(), 0, 1, GL_FALSE, glm::value_ptr(camera->getView())); glProgramUniformMatrix4fv(programHdr.getId(), 4, 1, GL_FALSE, glm::value_ptr(camera->getProj())); glBindTextureUnit(0, flareTex); glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT); glDrawArrays(GL_POINTS, 0, static_cast<GLsizei>(numParticles)); glBindVertexArray(vaoDeferred); glDisable(GL_BLEND); glViewport(0, 0, (camera->getWindowWidth() + 2 * FBO_MARGIN) / blurDownscale, (camera->getWindowHeight() + 2 * FBO_MARGIN) / blurDownscale); glUseProgram(programBlur.getId()); // Blur pingpong (N horizontal blurs then N vertical blurs) int loop = 0; for (int i = 0; i < 2; ++i) { if (i == 0) glProgramUniform2f(programBlur.getId(), 1, 1, 0); else glProgramUniform2f(programBlur.getId(), 1, 0, 1); for (int j = 0; j < 100; ++j) { GLuint fbo = fbos[(loop % 2) + 1]; GLuint attach = attachs[loop ? ((loop + 1) % 2 + 1) : 0]; glBindFramebuffer(GL_FRAMEBUFFER, fbo); glBindTextureUnit(0, attach); glDrawArrays(GL_TRIANGLES, 0, 3); loop++; } } // Average luminance glViewport(0, 0, (camera->getWindowWidth() + 2 * FBO_MARGIN) / 2, (camera->getWindowHeight() + 2 * FBO_MARGIN) / 2); glBindFramebuffer(GL_FRAMEBUFFER, fbos[3]); glUseProgram(programLum.getId()); glBindTextureUnit(0, attachs[0]); glDrawArrays(GL_TRIANGLES, 0, 3); glGenerateTextureMipmap(attachs[3]); // Tonemapping step (direct to screen) glViewport(0, 0, camera->getWindowWidth(), camera->getWindowHeight()); glBindFramebuffer(GL_FRAMEBUFFER, 0); glUseProgram(programTonemap.getId()); glProgramUniform1i(programTonemap.getId(), 0, lumLod); glBindTextureUnit(0, attachs[0]); glBindTextureUnit(1, attachs[2]); glBindTextureUnit(2, attachs[3]); glDrawArrays(GL_TRIANGLES, 0, 3); } ParticleSpriteRenderer::ParticleSpriteRenderer(int windowWidth, int windowHeight) { camera = new CameraRotateCenter(windowWidth, windowHeight); inputHandler = new SpriteRendererInputHandler(camera); } Camera_I *ParticleSpriteRenderer::getCamera() { return camera; } InputHandler_I *ParticleSpriteRenderer::getInputHandler() { return inputHandler; } void ParticleSpriteRenderer::destroy() { } glm::vec4 *ParticleSpriteRenderer::allocateParticlesAndInit_cpu(Particles* particles) { // SSBO allocation & data upload glNamedBufferStorage(vboParticlesPos, particles->numParticles * sizeof(glm::vec4), particles->pos, GL_MAP_WRITE_BIT | GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT); // Buffer storage is fixed size compared to BuferData //Mapping gpu memory to cpu memory for easy writes. glm::vec4 *particlePosPointer; this->numParticles = static_cast<size_t>(particles->numParticles); particlePosPointer = (glm::vec4 *) glMapNamedBufferRange(vboParticlesPos, 0, particles->numParticles * sizeof(glm::vec4), GL_MAP_WRITE_BIT | GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT); if (!particlePosPointer) { GLenum error = glGetError(); fprintf(stderr, "Buffer map failed! %d (%s)\n", error, glewGetErrorString(error)); //gluErrorString(error)); return nullptr; } else { return particlePosPointer; } } cudaGraphicsResource_t ParticleSpriteRenderer::allocateParticlesAndInit_gpu(Particles* particles) { // SSBO allocation & data upload glNamedBufferStorage(vboParticlesPos, particles->numParticles * sizeof(glm::vec4), particles->pos, GL_MAP_WRITE_BIT | GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT); // Buffer storage is fixed size compared to BuferData this->numParticles = static_cast<size_t>(particles->numParticles); cudaGraphicsResource_t vboParticlesPos_cuda; hipGraphicsGLRegisterBuffer(&vboParticlesPos_cuda, vboParticlesPos, hipGraphicsRegisterFlagsNone); return vboParticlesPos_cuda; }
e8b7f1e2511bf5e7eb92bd6d2c8663fefd5eecb0.cu
#include "ParticleSpriteRenderer.cuh" #include <algorithm> #include <glm/gtc/type_ptr.hpp> #include <cuda_gl_interop.h> #include "CameraRotateCenter.hpp" #define BLUR true const int FBO_MARGIN = 50; void ParticleSpriteRenderer::init() { glfwWindowHint(GLFW_CONTEXT_VERSION_MAJOR, 4); glfwWindowHint(GLFW_CONTEXT_VERSION_MINOR, 5); glfwWindowHint(GLFW_OPENGL_PROFILE, GLFW_OPENGL_CORE_PROFILE); // OpenGL initialization glewExperimental = GL_TRUE; GLenum error = glewInit(); if (error != GLEW_OK) { std::cout << "PROBLEMS! CANNOT LOAD OpenGL!" << std::endl; throw std::runtime_error("Can't load GL"); } createFlareTexture(); createVaosVbos(); initShaders(); initFbos(); setUniforms(); } std::vector<float> genFlareTex(int tex_size) { std::vector<float> pixels(static_cast<unsigned long>(tex_size * tex_size)); float sigma2 = static_cast<float>(tex_size) / 2.0f; float A = 1.0; for (int i = 0; i < tex_size; ++i) { float i1 = i - tex_size / 2; for (int j = 0; j < tex_size; ++j) { float j1 = j - tex_size / 2; // gamma corrected gauss pixels[i * tex_size + j] = pow(A * exp(-((i1 * i1) / (2 * sigma2) + (j1 * j1) / (2 * sigma2))), 2.2f); } } return pixels; } void ParticleSpriteRenderer::createFlareTexture() { texSize = 16; glCreateTextures(GL_TEXTURE_2D, 1, &flareTex); glTextureStorage2D(flareTex, 1, GL_R32F, texSize, texSize); glTextureParameteri(flareTex, GL_TEXTURE_MIN_FILTER, GL_LINEAR); { std::vector<float> pixels = genFlareTex(texSize); glTextureSubImage2D(flareTex, 0, 0, 0, texSize, texSize, GL_RED, GL_FLOAT, pixels.data()); } } void ParticleSpriteRenderer::createVaosVbos() { // Particle VAO glCreateVertexArrays(1, &vaoParticles); glCreateBuffers(1, &vboParticlesPos); // glCreateBuffers(1, &ssboVelocities); glVertexArrayVertexBuffer(vaoParticles, 0, vboParticlesPos, 0, sizeof(glm::vec4)); // glVertexArrayVertexBuffer(vaoParticles, 1, ssboVelocities, 0, sizeof(glm::vec4)); // Position glEnableVertexArrayAttrib(vaoParticles, 0); glVertexArrayAttribFormat(vaoParticles, 0, 4, GL_FLOAT, GL_FALSE, 0); glVertexArrayAttribBinding(vaoParticles, 0, 0); // Velocity glEnableVertexArrayAttrib(vaoParticles, 1); glVertexArrayAttribFormat(vaoParticles, 1, 4, GL_FLOAT, GL_FALSE, 0); glVertexArrayAttribBinding(vaoParticles, 1, 1); // Deferred VAO glCreateVertexArrays(1, &vaoDeferred); glCreateBuffers(1, &vboDeferred); glVertexArrayVertexBuffer(vaoDeferred, 0, vboDeferred, 0, sizeof(glm::vec2)); // Position glEnableVertexArrayAttrib(vaoDeferred, 0); glVertexArrayAttribFormat(vaoDeferred, 0, 2, GL_FLOAT, GL_FALSE, 0); glVertexArrayAttribBinding(vaoDeferred, 0, 0); // Deferred tri glm::vec2 tri[3] = { glm::vec2(-2, -1), glm::vec2(+2, -1), glm::vec2(0, 4)}; glNamedBufferStorage(vboDeferred, 3 * sizeof(glm::vec2), tri, 0); } void ParticleSpriteRenderer::initShaders() { programHdr.source(GL_VERTEX_SHADER, "shaders/main.vert"); programHdr.source(GL_FRAGMENT_SHADER, "shaders/main.frag"); programHdr.source(GL_GEOMETRY_SHADER, "shaders/main.geom"); programHdr.link(); programTonemap.source(GL_VERTEX_SHADER, "shaders/deferred.vert"); programTonemap.source(GL_FRAGMENT_SHADER, "shaders/tonemap.frag"); programTonemap.link(); if (BLUR) { programBlur.source(GL_VERTEX_SHADER, "shaders/deferred.vert"); programBlur.source(GL_FRAGMENT_SHADER, "shaders/blur.frag"); programBlur.link(); } programLum.source(GL_VERTEX_SHADER, "shaders/deferred.vert"); programLum.source(GL_FRAGMENT_SHADER, "shaders/luminance.frag"); programLum.link(); } void ParticleSpriteRenderer::initFbos() { int blur_dsc = 2; blurDownscale = blur_dsc; glCreateFramebuffers(4, fbos); glCreateTextures(GL_TEXTURE_2D, 4, attachs); int base_width = camera->getWindowWidth() + 2 * FBO_MARGIN; int base_height = camera->getWindowHeight() + 2 * FBO_MARGIN; int widths[] = {base_width, base_width / blur_dsc, base_width / blur_dsc, base_width / 2}; int heights[] = {base_height, base_height / blur_dsc, base_height / blur_dsc, base_height / 2}; lumLod = (int) floor(log2((double) max(base_width, base_height) / 2)); int mipmaps[] = {1, 1, 1, lumLod + 1}; GLenum types[] = {GL_RGBA16F, GL_RGBA16F, GL_RGBA16F, GL_R16F}; GLenum min_filters[] = {GL_LINEAR, GL_LINEAR, GL_LINEAR, GL_LINEAR_MIPMAP_LINEAR}; for (int i = 0; i < 4; ++i) { glTextureStorage2D(attachs[i], mipmaps[i], types[i], widths[i], heights[i]); glTextureParameteri(attachs[i], GL_TEXTURE_MIN_FILTER, min_filters[i]); glTextureParameteri(attachs[i], GL_TEXTURE_WRAP_S, GL_CLAMP_TO_EDGE); glTextureParameteri(attachs[i], GL_TEXTURE_WRAP_T, GL_CLAMP_TO_EDGE); glNamedFramebufferTexture(fbos[i], GL_COLOR_ATTACHMENT0, attachs[i], 0); } } void ParticleSpriteRenderer::setUniforms() { // const Uniforms // glProgramUniform1f(programInteraction.getId(), 0, SIM_dt); // glProgramUniform1f(programInteraction.getId(), 1, G); // glProgramUniform1f(programInteraction.getId(), 2, DAMPING); // glProgramUniform1f(programIntegration.getId(), 0, SIM_dt); // NDC sprite size glProgramUniform2f(programHdr.getId(), 8, texSize / float(2 * camera->getWindowWidth()), texSize / float(2 * camera->getWindowHeight())); // Blur sample offset length glProgramUniform2f(programBlur.getId(), 0, (float) blurDownscale / camera->getWindowWidth(), (float) blurDownscale / camera->getWindowHeight()); } void ParticleSpriteRenderer::render(float frameTime) { camera->applyInput(); // Particle HDR rendering glViewport(0, 0, camera->getWindowWidth() + 2 * FBO_MARGIN, camera->getWindowHeight() + 2 * FBO_MARGIN); glBindVertexArray(vaoParticles); glEnable(GL_BLEND); glBlendFunc(GL_ONE, GL_ONE); glBindFramebuffer(GL_FRAMEBUFFER, fbos[0]); glUseProgram(programHdr.getId()); glClear(GL_COLOR_BUFFER_BIT); glProgramUniformMatrix4fv(programHdr.getId(), 0, 1, GL_FALSE, glm::value_ptr(camera->getView())); glProgramUniformMatrix4fv(programHdr.getId(), 4, 1, GL_FALSE, glm::value_ptr(camera->getProj())); glBindTextureUnit(0, flareTex); glMemoryBarrier(GL_VERTEX_ATTRIB_ARRAY_BARRIER_BIT); glDrawArrays(GL_POINTS, 0, static_cast<GLsizei>(numParticles)); glBindVertexArray(vaoDeferred); glDisable(GL_BLEND); glViewport(0, 0, (camera->getWindowWidth() + 2 * FBO_MARGIN) / blurDownscale, (camera->getWindowHeight() + 2 * FBO_MARGIN) / blurDownscale); glUseProgram(programBlur.getId()); // Blur pingpong (N horizontal blurs then N vertical blurs) int loop = 0; for (int i = 0; i < 2; ++i) { if (i == 0) glProgramUniform2f(programBlur.getId(), 1, 1, 0); else glProgramUniform2f(programBlur.getId(), 1, 0, 1); for (int j = 0; j < 100; ++j) { GLuint fbo = fbos[(loop % 2) + 1]; GLuint attach = attachs[loop ? ((loop + 1) % 2 + 1) : 0]; glBindFramebuffer(GL_FRAMEBUFFER, fbo); glBindTextureUnit(0, attach); glDrawArrays(GL_TRIANGLES, 0, 3); loop++; } } // Average luminance glViewport(0, 0, (camera->getWindowWidth() + 2 * FBO_MARGIN) / 2, (camera->getWindowHeight() + 2 * FBO_MARGIN) / 2); glBindFramebuffer(GL_FRAMEBUFFER, fbos[3]); glUseProgram(programLum.getId()); glBindTextureUnit(0, attachs[0]); glDrawArrays(GL_TRIANGLES, 0, 3); glGenerateTextureMipmap(attachs[3]); // Tonemapping step (direct to screen) glViewport(0, 0, camera->getWindowWidth(), camera->getWindowHeight()); glBindFramebuffer(GL_FRAMEBUFFER, 0); glUseProgram(programTonemap.getId()); glProgramUniform1i(programTonemap.getId(), 0, lumLod); glBindTextureUnit(0, attachs[0]); glBindTextureUnit(1, attachs[2]); glBindTextureUnit(2, attachs[3]); glDrawArrays(GL_TRIANGLES, 0, 3); } ParticleSpriteRenderer::ParticleSpriteRenderer(int windowWidth, int windowHeight) { camera = new CameraRotateCenter(windowWidth, windowHeight); inputHandler = new SpriteRendererInputHandler(camera); } Camera_I *ParticleSpriteRenderer::getCamera() { return camera; } InputHandler_I *ParticleSpriteRenderer::getInputHandler() { return inputHandler; } void ParticleSpriteRenderer::destroy() { } glm::vec4 *ParticleSpriteRenderer::allocateParticlesAndInit_cpu(Particles* particles) { // SSBO allocation & data upload glNamedBufferStorage(vboParticlesPos, particles->numParticles * sizeof(glm::vec4), particles->pos, GL_MAP_WRITE_BIT | GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT); // Buffer storage is fixed size compared to BuferData //Mapping gpu memory to cpu memory for easy writes. glm::vec4 *particlePosPointer; this->numParticles = static_cast<size_t>(particles->numParticles); particlePosPointer = (glm::vec4 *) glMapNamedBufferRange(vboParticlesPos, 0, particles->numParticles * sizeof(glm::vec4), GL_MAP_WRITE_BIT | GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT); if (!particlePosPointer) { GLenum error = glGetError(); fprintf(stderr, "Buffer map failed! %d (%s)\n", error, glewGetErrorString(error)); //gluErrorString(error)); return nullptr; } else { return particlePosPointer; } } cudaGraphicsResource_t ParticleSpriteRenderer::allocateParticlesAndInit_gpu(Particles* particles) { // SSBO allocation & data upload glNamedBufferStorage(vboParticlesPos, particles->numParticles * sizeof(glm::vec4), particles->pos, GL_MAP_WRITE_BIT | GL_MAP_READ_BIT | GL_MAP_PERSISTENT_BIT | GL_MAP_COHERENT_BIT); // Buffer storage is fixed size compared to BuferData this->numParticles = static_cast<size_t>(particles->numParticles); cudaGraphicsResource_t vboParticlesPos_cuda; cudaGraphicsGLRegisterBuffer(&vboParticlesPos_cuda, vboParticlesPos, cudaGraphicsRegisterFlagsNone); return vboParticlesPos_cuda; }
ca5a2fd9f89bec039fc2729340a822a320ebb319.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void Initialize_gpu(double state[],double m[], double l[],double I[],double Zetas[],int n); void printa(double a[], int n); #include <iostream> #include <stdio.h> void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]) { int x_size = 2*n; int I_size = 3*n*3; int z_size = 26*6*n; double *d_x, *d_m, *d_l, *d_I, *d_zs; // Allocate and Load M and N to device memor hipMalloc(&d_x,x_size*sizeof(double)); hipMalloc(&d_m, n*sizeof(double)); hipMalloc(&d_l, n*sizeof(double)); hipMalloc(&d_I, I_size*sizeof(double)); hipMalloc(&d_zs, z_size*sizeof(double)); hipMemcpy(d_x, x, x_size*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_m, m, n*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_I, I, I_size*sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_l, l, n*sizeof(double), hipMemcpyHostToDevice); dim3 dimBlock(6, 6,1); dim3 dimGrid(n,1,1); hipLaunchKernelGGL(( Initialize_gpu), dim3(dimGrid), dim3(dimBlock), 0, 0, d_x, d_m, d_l, d_I, d_zs, n); hipMemcpy(Zs, d_zs, z_size*sizeof(double), hipMemcpyDeviceToHost); hipFree(d_x); hipFree(d_m); hipFree(d_l); hipFree(d_I); hipFree(d_zs); }
ca5a2fd9f89bec039fc2729340a822a320ebb319.cu
__global__ void Initialize_gpu(double state[],double m[], double l[],double I[],double Zetas[],int n); void printa(double a[], int n); #include <iostream> #include <stdio.h> void CudaInitialize(double m[], double l[], double I[], double x[], int n, double Zs[]) { int x_size = 2*n; int I_size = 3*n*3; int z_size = 26*6*n; double *d_x, *d_m, *d_l, *d_I, *d_zs; // Allocate and Load M and N to device memor cudaMalloc(&d_x,x_size*sizeof(double)); cudaMalloc(&d_m, n*sizeof(double)); cudaMalloc(&d_l, n*sizeof(double)); cudaMalloc(&d_I, I_size*sizeof(double)); cudaMalloc(&d_zs, z_size*sizeof(double)); cudaMemcpy(d_x, x, x_size*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_m, m, n*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_I, I, I_size*sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_l, l, n*sizeof(double), cudaMemcpyHostToDevice); dim3 dimBlock(6, 6,1); dim3 dimGrid(n,1,1); Initialize_gpu<<<dimGrid, dimBlock>>>(d_x, d_m, d_l, d_I, d_zs, n); cudaMemcpy(Zs, d_zs, z_size*sizeof(double), cudaMemcpyDeviceToHost); cudaFree(d_x); cudaFree(d_m); cudaFree(d_l); cudaFree(d_I); cudaFree(d_zs); }
9d286d89dccdd1204291d029ebbedcd210303acd.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include "utils.h" #define SIZE 1e6 #define THREADS 256 #define BLOCKS MIN(32, (SIZE + THREADS - 1)/ THREADS) #define PI 3.14159265 #define RECTS 1000000 //1e6 #define START 0.0 #define END PI __global__ void integration(double *x, double *dx, double *results) { __shared__ double cache[THREADS]; int tid = threadIdx.x + (blockIdx.x * blockDim.x); int cacheIndex = threadIdx.x; double acum = 0; while (tid < SIZE) { acum += sin( (*x) + (tid * (*dx)) ); tid += blockDim.x * gridDim.x; } cache[cacheIndex] = acum; __syncthreads(); int i = blockDim.x / 2; while (i > 0) { if (cacheIndex < i) { cache[cacheIndex] += cache[cacheIndex + i]; } __syncthreads(); i /= 2; } if (cacheIndex == 0) { results[blockIdx.x] = cache[cacheIndex]; } } int main(int argc, char* argv[]) { double x, dx, *results; double *d_x, *d_dx, *d_r; double ms; int i; x = START; dx = (END - START) / RECTS; results = (double*) malloc( BLOCKS * sizeof(double) ); hipMalloc( (void**) &d_x, sizeof(double)); hipMalloc( (void**) &d_dx, sizeof(double)); hipMalloc( (void**) &d_r, BLOCKS * sizeof(double) ); hipMemcpy(d_x, &x, sizeof(double), hipMemcpyHostToDevice); hipMemcpy(d_dx, &dx, sizeof(double), hipMemcpyHostToDevice); printf("Starting...\n"); ms = 0; for (i = 1; i <= N; i++) { start_timer(); hipLaunchKernelGGL(( integration), dim3(BLOCKS), dim3(THREADS), 0, 0, d_x, d_dx, d_r); ms += stop_timer(); } hipMemcpy(results, d_r, BLOCKS * sizeof(double), hipMemcpyDeviceToHost); double acum = 0; for (i = 0; i < BLOCKS; i++) { acum += results[i]; } printf("area = %.5lf\n", (acum * dx)); printf("avg time = %.5lf\n", (ms / N)); hipFree(d_x); hipFree(d_dx); hipFree(d_r); free(results); return 0; }
9d286d89dccdd1204291d029ebbedcd210303acd.cu
#include <stdio.h> #include <stdlib.h> #include "utils.h" #define SIZE 1e6 #define THREADS 256 #define BLOCKS MIN(32, (SIZE + THREADS - 1)/ THREADS) #define PI 3.14159265 #define RECTS 1000000 //1e6 #define START 0.0 #define END PI __global__ void integration(double *x, double *dx, double *results) { __shared__ double cache[THREADS]; int tid = threadIdx.x + (blockIdx.x * blockDim.x); int cacheIndex = threadIdx.x; double acum = 0; while (tid < SIZE) { acum += sin( (*x) + (tid * (*dx)) ); tid += blockDim.x * gridDim.x; } cache[cacheIndex] = acum; __syncthreads(); int i = blockDim.x / 2; while (i > 0) { if (cacheIndex < i) { cache[cacheIndex] += cache[cacheIndex + i]; } __syncthreads(); i /= 2; } if (cacheIndex == 0) { results[blockIdx.x] = cache[cacheIndex]; } } int main(int argc, char* argv[]) { double x, dx, *results; double *d_x, *d_dx, *d_r; double ms; int i; x = START; dx = (END - START) / RECTS; results = (double*) malloc( BLOCKS * sizeof(double) ); cudaMalloc( (void**) &d_x, sizeof(double)); cudaMalloc( (void**) &d_dx, sizeof(double)); cudaMalloc( (void**) &d_r, BLOCKS * sizeof(double) ); cudaMemcpy(d_x, &x, sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(d_dx, &dx, sizeof(double), cudaMemcpyHostToDevice); printf("Starting...\n"); ms = 0; for (i = 1; i <= N; i++) { start_timer(); integration<<<BLOCKS, THREADS>>> (d_x, d_dx, d_r); ms += stop_timer(); } cudaMemcpy(results, d_r, BLOCKS * sizeof(double), cudaMemcpyDeviceToHost); double acum = 0; for (i = 0; i < BLOCKS; i++) { acum += results[i]; } printf("area = %.5lf\n", (acum * dx)); printf("avg time = %.5lf\n", (ms / N)); cudaFree(d_x); cudaFree(d_dx); cudaFree(d_r); free(results); return 0; }
ef03fb33133cff86a89f8a63413ef82729dc8da3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void ComputeAverageThetaVelocitiesKernel(double *Vtheta, double *VMed, int nsec, int nrad) { int i = threadIdx.x + blockDim.x*blockIdx.x; double moy = 0.0; if (i<nrad){ for (int j = 0; j < nsec; j++) moy += Vtheta[i*nsec + j]; VMed[i] = moy/(double)nsec; } }
ef03fb33133cff86a89f8a63413ef82729dc8da3.cu
#include "includes.h" __global__ void ComputeAverageThetaVelocitiesKernel(double *Vtheta, double *VMed, int nsec, int nrad) { int i = threadIdx.x + blockDim.x*blockIdx.x; double moy = 0.0; if (i<nrad){ for (int j = 0; j < nsec; j++) moy += Vtheta[i*nsec + j]; VMed[i] = moy/(double)nsec; } }
3109c3e5695f3171bba453215b483fdb0b87dc6e.hip
// !!! This is a file automatically generated by hipify!!! /* * CUDA blur * Kevin Yuh, 2014 * Revised by Nailen Matschke, 2016 * Revised by Loko Kung, 2018 */ #include "blur.cuh" #include <cstdio> #include <hip/hip_runtime.h> #include "cuda_header.cuh" CUDA_CALLABLE void cuda_blur_kernel_convolution(uint thread_index, const float* gpu_raw_data, const float* gpu_blur_v, float* gpu_out_data, const unsigned int n_frames, const unsigned int blur_v_size) { // TODO: Implement the necessary convolution function that should be // completed for each thread_index. Use the CPU implementation in // blur.cpp as a reference. if (thread_index < blur_v_size) { for (int j = 0; j <= thread_index; j++) { gpu_out_data[thread_index] += gpu_raw_data[thread_index - j] * gpu_blur_v[j]; } } else { for (int j = 0; j < blur_v_size; j++) { gpu_out_data[thread_index] += gpu_raw_data[thread_index - j] * gpu_blur_v[j]; } } } __global__ void cuda_blur_kernel(const float *gpu_raw_data, const float *gpu_blur_v, float *gpu_out_data, int n_frames, int blur_v_size) { // TODO: Compute the current thread index. uint thread_index; thread_index = blockIdx.x * blockDim.x + threadIdx.x; // TODO: Update the while loop to handle all indices for this thread. // Remember to advance the index as necessary. while (thread_index < n_frames) { // Do computation for this thread index cuda_blur_kernel_convolution(thread_index, gpu_raw_data, gpu_blur_v, gpu_out_data, n_frames, blur_v_size); // TODO: Update the thread index thread_index += blockDim.x * gridDim.x; } } float cuda_call_blur_kernel(const unsigned int blocks, const unsigned int threads_per_block, const float *raw_data, const float *blur_v, float *out_data, const unsigned int n_frames, const unsigned int blur_v_size) { // Use the CUDA machinery for recording time hipEvent_t start_gpu, stop_gpu; float time_milli = -1; hipEventCreate(&start_gpu); hipEventCreate(&stop_gpu); hipEventRecord(start_gpu); // TODO: Allocate GPU memory for the raw input data (either audio file // data or randomly generated data. The data is of type float and // has n_frames elements. Then copy the data in raw_data into the // GPU memory you allocated. float* gpu_raw_data; hipMalloc(&gpu_raw_data, n_frames * sizeof(float)); hipMemcpy(gpu_raw_data, raw_data, n_frames * sizeof(float), hipMemcpyHostToDevice); // TODO: Allocate GPU memory for the impulse signal (for now global GPU // memory is fine. The data is of type float and has blur_v_size // elements. Then copy the data in blur_v into the GPU memory you // allocated. float* gpu_blur_v; hipMalloc(&gpu_blur_v, blur_v_size * sizeof(float)); hipMemcpy(gpu_blur_v, blur_v, blur_v_size * sizeof(float), hipMemcpyHostToDevice); // TODO: Allocate GPU memory to store the output audio signal after the // convolution. The data is of type float and has n_frames elements. // Initialize the data as necessary. float* gpu_out_data; hipMalloc(&gpu_out_data, n_frames * sizeof(float)); hipMemset(&gpu_out_data, 0, n_frames * sizeof(float)); // TODO: Appropriately call the kernel function. hipLaunchKernelGGL(( cuda_blur_kernel), dim3(blocks), dim3(threads_per_block), 0, 0, gpu_raw_data, gpu_blur_v, gpu_out_data, n_frames, blur_v_size); // Check for errors on kernel call hipError_t err = hipGetLastError(); if (hipSuccess != err) fprintf(stderr, "Error %s\n", hipGetErrorString(err)); else fprintf(stderr, "No kernel error detected\n"); // TODO: Now that kernel calls have finished, copy the output signal // back from the GPU to host memory. (We store this channel's result // in out_data on the host.) hipMemcpy(out_data, gpu_out_data, n_frames * sizeof(float), hipMemcpyDeviceToHost); // TODO: Now that we have finished our computations on the GPU, free the // GPU resources. hipFree(gpu_blur_v); hipFree(gpu_out_data); // Stop the recording timer and return the computation time hipEventRecord(stop_gpu); hipEventSynchronize(stop_gpu); hipEventElapsedTime(&time_milli, start_gpu, stop_gpu); return time_milli; }
3109c3e5695f3171bba453215b483fdb0b87dc6e.cu
/* * CUDA blur * Kevin Yuh, 2014 * Revised by Nailen Matschke, 2016 * Revised by Loko Kung, 2018 */ #include "blur.cuh" #include <cstdio> #include <cuda_runtime.h> #include "cuda_header.cuh" CUDA_CALLABLE void cuda_blur_kernel_convolution(uint thread_index, const float* gpu_raw_data, const float* gpu_blur_v, float* gpu_out_data, const unsigned int n_frames, const unsigned int blur_v_size) { // TODO: Implement the necessary convolution function that should be // completed for each thread_index. Use the CPU implementation in // blur.cpp as a reference. if (thread_index < blur_v_size) { for (int j = 0; j <= thread_index; j++) { gpu_out_data[thread_index] += gpu_raw_data[thread_index - j] * gpu_blur_v[j]; } } else { for (int j = 0; j < blur_v_size; j++) { gpu_out_data[thread_index] += gpu_raw_data[thread_index - j] * gpu_blur_v[j]; } } } __global__ void cuda_blur_kernel(const float *gpu_raw_data, const float *gpu_blur_v, float *gpu_out_data, int n_frames, int blur_v_size) { // TODO: Compute the current thread index. uint thread_index; thread_index = blockIdx.x * blockDim.x + threadIdx.x; // TODO: Update the while loop to handle all indices for this thread. // Remember to advance the index as necessary. while (thread_index < n_frames) { // Do computation for this thread index cuda_blur_kernel_convolution(thread_index, gpu_raw_data, gpu_blur_v, gpu_out_data, n_frames, blur_v_size); // TODO: Update the thread index thread_index += blockDim.x * gridDim.x; } } float cuda_call_blur_kernel(const unsigned int blocks, const unsigned int threads_per_block, const float *raw_data, const float *blur_v, float *out_data, const unsigned int n_frames, const unsigned int blur_v_size) { // Use the CUDA machinery for recording time cudaEvent_t start_gpu, stop_gpu; float time_milli = -1; cudaEventCreate(&start_gpu); cudaEventCreate(&stop_gpu); cudaEventRecord(start_gpu); // TODO: Allocate GPU memory for the raw input data (either audio file // data or randomly generated data. The data is of type float and // has n_frames elements. Then copy the data in raw_data into the // GPU memory you allocated. float* gpu_raw_data; cudaMalloc(&gpu_raw_data, n_frames * sizeof(float)); cudaMemcpy(gpu_raw_data, raw_data, n_frames * sizeof(float), cudaMemcpyHostToDevice); // TODO: Allocate GPU memory for the impulse signal (for now global GPU // memory is fine. The data is of type float and has blur_v_size // elements. Then copy the data in blur_v into the GPU memory you // allocated. float* gpu_blur_v; cudaMalloc(&gpu_blur_v, blur_v_size * sizeof(float)); cudaMemcpy(gpu_blur_v, blur_v, blur_v_size * sizeof(float), cudaMemcpyHostToDevice); // TODO: Allocate GPU memory to store the output audio signal after the // convolution. The data is of type float and has n_frames elements. // Initialize the data as necessary. float* gpu_out_data; cudaMalloc(&gpu_out_data, n_frames * sizeof(float)); cudaMemset(&gpu_out_data, 0, n_frames * sizeof(float)); // TODO: Appropriately call the kernel function. cuda_blur_kernel<<<blocks, threads_per_block>>>(gpu_raw_data, gpu_blur_v, gpu_out_data, n_frames, blur_v_size); // Check for errors on kernel call cudaError err = cudaGetLastError(); if (cudaSuccess != err) fprintf(stderr, "Error %s\n", cudaGetErrorString(err)); else fprintf(stderr, "No kernel error detected\n"); // TODO: Now that kernel calls have finished, copy the output signal // back from the GPU to host memory. (We store this channel's result // in out_data on the host.) cudaMemcpy(out_data, gpu_out_data, n_frames * sizeof(float), cudaMemcpyDeviceToHost); // TODO: Now that we have finished our computations on the GPU, free the // GPU resources. cudaFree(gpu_blur_v); cudaFree(gpu_out_data); // Stop the recording timer and return the computation time cudaEventRecord(stop_gpu); cudaEventSynchronize(stop_gpu); cudaEventElapsedTime(&time_milli, start_gpu, stop_gpu); return time_milli; }
7baeb80ddf5b3ecd2d42add7d96052b1c33b20f8.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <algorithm> #include <vector> #include "caffe/layers/lilac_layer.hpp" #include <math.h> namespace caffe { template <typename Dtype> __global__ void LilacForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : exp(in[index] ) - 1.0; } } template <typename Dtype> void LilacLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); //printf("forward relu\n"); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LilacForward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void LilacBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * exp(in_data[index])); } } template <typename Dtype> void LilacLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // printf("backward relu\n"); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( LilacBackward<Dtype>), dim3(CAFFE_GET_BLOCKS(count)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(LilacLayer); } // namespace caffe
7baeb80ddf5b3ecd2d42add7d96052b1c33b20f8.cu
#include <algorithm> #include <vector> #include "caffe/layers/lilac_layer.hpp" #include <math.h> namespace caffe { template <typename Dtype> __global__ void LilacForward(const int n, const Dtype* in, Dtype* out, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out[index] = in[index] > 0 ? in[index] : exp(in[index] ) - 1.0; } } template <typename Dtype> void LilacLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); const int count = bottom[0]->count(); //printf("forward relu\n"); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) LilacForward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, bottom_data, top_data, negative_slope); CUDA_POST_KERNEL_CHECK; // << " count: " << count << " bottom_data: " // << (unsigned long)bottom_data // << " top_data: " << (unsigned long)top_data // << " blocks: " << CAFFE_GET_BLOCKS(count) // << " threads: " << CAFFE_CUDA_NUM_THREADS; } template <typename Dtype> __global__ void LilacBackward(const int n, const Dtype* in_diff, const Dtype* in_data, Dtype* out_diff, Dtype negative_slope) { CUDA_KERNEL_LOOP(index, n) { out_diff[index] = in_diff[index] * ((in_data[index] > 0) + (in_data[index] <= 0) * exp(in_data[index])); } } template <typename Dtype> void LilacLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { // printf("backward relu\n"); if (propagate_down[0]) { const Dtype* bottom_data = bottom[0]->gpu_data(); const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); const int count = bottom[0]->count(); Dtype negative_slope = this->layer_param_.relu_param().negative_slope(); // NOLINT_NEXT_LINE(whitespace/operators) LilacBackward<Dtype><<<CAFFE_GET_BLOCKS(count), CAFFE_CUDA_NUM_THREADS>>>( count, top_diff, bottom_data, bottom_diff, negative_slope); CUDA_POST_KERNEL_CHECK; } } INSTANTIATE_LAYER_GPU_FUNCS(LilacLayer); } // namespace caffe
763fc7c30e2df8733e76c74def9f6ae9f9fbb659.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <SiftFeatures.h> __global__ void Convolution(float* image,float* mask, ArrayImage* PyDoG, int maskR,int maskC, int imgR,int imgC, float* imgOut, int idxPyDoG) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; int iImg=0; float aux=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// if(iImg < imgC*imgR){ int condition=maskC/2+imgC*(floor((double)maskC/2)); if (iImg-condition < 0 || ///condicion arriba iImg+condition > imgC*imgR || ///condicion abajo iImg%imgC < maskC/2 || ///condicion izquierda iImg%imgC > (imgC-1)-(maskC/2) ) ///condicion derecha { aux=0; }else{ int itMask = 0; int itImg=iImg-condition; for (int j = 0; j < maskR; ++j) { for (int h = 0; h < maskC; ++h) { aux+=image[itImg]*mask[itMask]; ++itMask; ++itImg; } itImg+=imgC-maskC; } } imgOut[iImg]=aux; aux=0; } } PyDoG[idxPyDoG].image=imgOut; } __global__ void LocateMaxMin(ArrayImage* PyDoG, int idxPyDoG , float * imgOut ,MinMax * mM, int maskC, int imgR,int imgC, int idxmM) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { //int min=0; //int max=0; float value=0.0; //float compare =0.0; ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// if(iImg < imgC*imgR){ int condition=maskC/2+imgC*(floor((double)maskC/2)); if (iImg-condition < 0 || ///condicion arriba iImg+condition > imgC*imgR || ///condicion abajo iImg%imgC < maskC/2 || ///condicion izquierda iImg%imgC > (imgC-1)-(maskC/2) ) ///condicion derecha { imgOut[iImg]=0; } else{ value=PyDoG[idxPyDoG].image[iImg]; if(value > PyDoG[idxPyDoG-1].image[iImg-(1+imgC)] && value > PyDoG[idxPyDoG-1].image[iImg-imgC] && value > PyDoG[idxPyDoG-1].image[iImg-(imgC-1)] && value > PyDoG[idxPyDoG-1].image[iImg-1] && value > PyDoG[idxPyDoG-1].image[iImg] && value > PyDoG[idxPyDoG-1].image[iImg+1] && value > PyDoG[idxPyDoG-1].image[iImg+(imgC-1)] && value > PyDoG[idxPyDoG-1].image[iImg+imgC] && value > PyDoG[idxPyDoG-1].image[iImg+(1+imgC)] && value > PyDoG[idxPyDoG].image[iImg-(1+imgC)] && value > PyDoG[idxPyDoG].image[iImg-imgC] && value > PyDoG[idxPyDoG].image[iImg-(imgC-1)] && value > PyDoG[idxPyDoG].image[iImg-1] && value > PyDoG[idxPyDoG].image[iImg+1] && value > PyDoG[idxPyDoG].image[iImg+(imgC-1)] && value > PyDoG[idxPyDoG].image[iImg+imgC] && value > PyDoG[idxPyDoG].image[iImg+(1+imgC)] && value > PyDoG[idxPyDoG+1].image[iImg-(1+imgC)] && value > PyDoG[idxPyDoG+1].image[iImg-imgC] && value > PyDoG[idxPyDoG+1].image[iImg-(imgC-1)] && value > PyDoG[idxPyDoG+1].image[iImg-1] && value > PyDoG[idxPyDoG+1].image[iImg] && value > PyDoG[idxPyDoG+1].image[iImg+1] && value > PyDoG[idxPyDoG+1].image[iImg+(imgC-1)] && value > PyDoG[idxPyDoG+1].image[iImg+imgC] && value > PyDoG[idxPyDoG+1].image[iImg+(1+imgC)]) {///Max imgOut[iImg]=1; }else if(value < PyDoG[idxPyDoG-1].image[iImg-(1+imgC)] && value < PyDoG[idxPyDoG-1].image[iImg-imgC] && value < PyDoG[idxPyDoG-1].image[iImg-(imgC-1)] && value < PyDoG[idxPyDoG-1].image[iImg-1] && value < PyDoG[idxPyDoG-1].image[iImg] && value < PyDoG[idxPyDoG-1].image[iImg+1] && value < PyDoG[idxPyDoG-1].image[iImg+(imgC-1)] && value < PyDoG[idxPyDoG-1].image[iImg+imgC] && value < PyDoG[idxPyDoG-1].image[iImg+(1+imgC)] && value < PyDoG[idxPyDoG].image[iImg-(1+imgC)] && value < PyDoG[idxPyDoG].image[iImg-imgC] && value < PyDoG[idxPyDoG].image[iImg-(imgC-1)] && value < PyDoG[idxPyDoG].image[iImg-1] && value < PyDoG[idxPyDoG].image[iImg+1] && value < PyDoG[idxPyDoG].image[iImg+(imgC-1)] && value < PyDoG[idxPyDoG].image[iImg+imgC] && value < PyDoG[idxPyDoG].image[iImg+(1+imgC)] && value < PyDoG[idxPyDoG+1].image[iImg-(1+imgC)] && value < PyDoG[idxPyDoG+1].image[iImg-imgC] && value < PyDoG[idxPyDoG+1].image[iImg-(imgC-1)] && value < PyDoG[idxPyDoG+1].image[iImg-1] && value < PyDoG[idxPyDoG+1].image[iImg] && value < PyDoG[idxPyDoG+1].image[iImg+1] && value < PyDoG[idxPyDoG+1].image[iImg+(imgC-1)] && value < PyDoG[idxPyDoG+1].image[iImg+imgC] && value < PyDoG[idxPyDoG+1].image[iImg+(1+imgC)]){//Min imgOut[iImg]=1; } else { imgOut[iImg]=0; } } } } mM[idxmM].minMax=imgOut; } __global__ void RemoveOutlier(ArrayImage* PyDoG, MinMax * mM, int idxmM, int idxPyDoG, int imgR,int imgC ,float* auxOut) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// if(iImg < imgC*imgR){ if(mM[idxmM].minMax[iImg]>0 && fabs(PyDoG[idxPyDoG].image[iImg])> 0.024) { float d, dxx, dyy, dxy, tr, det; d = PyDoG[idxPyDoG].image[iImg]; dxx = PyDoG[idxPyDoG].image[iImg-imgC]+ PyDoG[idxPyDoG].image[iImg+imgC] - 2*d; dyy = PyDoG[idxPyDoG].image[iImg-1]+ PyDoG[idxPyDoG].image[iImg+1] - 2*d; dxy = (PyDoG[idxPyDoG].image[iImg-imgC-1] + PyDoG[idxPyDoG].image[iImg+1+imgC] - PyDoG[idxPyDoG].image[iImg+imgC-1] - PyDoG[idxPyDoG].image[iImg-imgC+1])/4.0; tr = dxx + dyy; det = dxx*dyy - dxy*dxy; /* if(det <= 0 ) mM[idxmM].minMax[iImg]=0; else if( (tr*tr/det) < 12.1){ mM[idxmM].minMax[iImg]=1; }else{ mM[idxmM].minMax[iImg]=0; }*/ if(det<0 || tr*tr/det > 7.2) { mM[idxmM].minMax[iImg]=0; } }else { mM[idxmM].minMax[iImg]=0; } auxOut[iImg]=mM[idxmM].minMax[iImg]; } } } __global__ void OriMag(ArrayImage* PyDoG, int idxPyDoG, int imgR,int imgC , ArrayImage* Mag, ArrayImage* Ori, int idxMagOri, float* MagAux, float* OriAux) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; float dx,dy; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// if(iImg < imgC*imgR){ int condition=1/2+imgC*(floor((double)1/2)); if (iImg-condition < 0 || ///condicion arriba iImg+condition > imgC*imgR || ///condicion abajo iImg%imgC < 1/2 || ///condicion izquierda iImg%imgC > (imgC-1)-(1/2) ) ///condicion derecha { OriAux[iImg]=0; MagAux[iImg]=0; } else{ dx=PyDoG[idxPyDoG].image[iImg+1]-PyDoG[idxPyDoG].image[iImg-1]; dy=PyDoG[idxPyDoG].image[iImg+imgC]-PyDoG[idxPyDoG].image[iImg-imgC]; MagAux[iImg]=sqrt(dx*dx + dy*dy); OriAux[iImg]=atan2(dy,dx); } } } Mag[idxMagOri].image= MagAux; Ori[idxMagOri].image= OriAux; } __global__ void KeyPoints(ArrayImage * Mag, ArrayImage * Ori, MinMax * mM , int idxMOmM, keyPoint * KP, float sigma, int imgR,int imgC, int octava ) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; float o = 0, val=0; int x=0, y=0, octv=-1; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// octv=-1; if(iImg < imgC*imgR ){ if(mM[idxMOmM].minMax[iImg]>0 ){ float histo[36]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; octv=octava; x=iImg%imgC; y=iImg/imgC; int idxMO= (iImg-5)-(5*imgC); float exp_denom = 2.0 * sigma * sigma; float w; int bin; for (int i = -5; i < 6; ++i) { for (int j = -5; j < 6; ++j) { w = exp( -( i*i + j*j ) / exp_denom ); bin =(Ori[idxMOmM].image[idxMO]<0)?round((double) (18*(6.283185307-Ori[idxMOmM].image[idxMO])/3.141592654)): round((double) (18*Ori[idxMOmM].image[idxMO]/3.141592654)); histo[bin]+= w*Mag[idxMOmM].image[idxMO]; ++idxMO; } idxMO=idxMO+imgC-11; } int idxH=0; float valMaxH = histo[0]; for (int i = 1; i < 36; ++i) { if(histo[i]>valMaxH){ idxH = i; valMaxH=histo[i]; } } //printf("%f\n", valMaxH); int l = (idxH == 0)? 35:idxH-1; int r = (idxH+1)%36; float bin_; bin_= idxH + ((0.5*(histo[l]-histo[r]))/(histo[l]-(2*histo[idxH])+histo[r])); bin_= ( bin_ < 0 )? 36 + bin_ : ( bin_ >= 36 )? bin_ - 36 : bin_; o=((360*bin_)/36);//-3.141592654; val=valMaxH; } else{ o=-1.0; x=-1; y=-1; octv=-1; } KP[iImg].orientacion=o; KP[iImg].x=x; KP[iImg].y=y; KP[iImg].octv=octv; KP[iImg].size=val; } } } __global__ void CountKeyPoint(MinMax * mM, int idxmM, int imgR, int imgC, int * numKeyP) { int tid= threadIdx.x; int bDim=blockDim.x; __shared__ int num; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/bDim); ////////numero de veces que caben if(tid==0) num=0; __syncthreads(); for(int i = 0; i < pxlThrd; ++i)///////////////////////////// Strike { iImg= tid+(i*bDim); if(iImg < imgC*imgR && mM[idxmM].minMax[iImg]>0){ atomicAdd(&num,1); } } numKeyP[0]=num; } void MaskGenerator(double sigma, int size,Mat mask){//Generate Gaussian Kernel Mat aux = getGaussianKernel(size,sigma,CV_32F); Mat aux_t; transpose(aux,aux_t); mask=aux*aux_t; } int ResizeImage(Mat image,vector<Mat>& images, int octvs){ images.push_back(image); for(int i=0; i<octvs-1; ++i) { Mat aux = images[i]; resize(aux,aux,Size(images[i].cols/2,images[i].rows/2)); images.push_back(aux); } return 0; } int PyramidKDoG(vector<Mat> & PyKDoG, int octvs, int intvls){ vector<double> sig; double sigma =sqrt(2.0f); vector<Mat> PyGauss; Mat resizeI; int size = 11;//size of gaussian mask Mat mask=Mat::ones(size,size,CV_32F); MaskGenerator(1,size,mask); PyGauss.push_back(mask); for(int i=1; i<intvls+3; ++i){ Mat aux=Mat::ones(size,size,CV_32F); double sigmaf=sqrt(pow(2.0,2.0/intvls)-1) * sigma; sigma= pow(2.0,1.0/ intvls ) * sigma; MaskGenerator(sigmaf,size,aux); PyGauss.push_back(aux); } ////////////////////////////// ///////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Resta de Gausianas for(int i=0; i<intvls+2; ++i){ Mat aux=Mat::ones(size,size,CV_32F); subtract(PyGauss[i+1],PyGauss[i],aux); PyKDoG.push_back(aux); } /////////////////////////////////////////////////////////////////////////////////////// return 0; } int foundIndexesMaxMin(float* minMax,vector<int*> & idxMinMax, int count ) { vector<int> idxmM; for (int c = 0; c < count; ++c) { if (minMax[c]==0.0) { idxmM.push_back(c); //cout<<c<<endl; } } idxMinMax.push_back(idxmM.data()); return 0; } int SiftFeatures(Mat Image, vector<Mat> PyDoG,Mat I){ const int intvls = 3; int octvs; //hipError_t e; octvs = log( min( Image.rows, Image.cols ) ) / log(2) - 2; vector<Mat> PyKDoG; vector<Mat> images; ArrayImage * pyDoG; MinMax * minMax; int mMidx=1; int idxPyDoG=0; hipFuncSetCacheConfig(Convolution,hipFuncCachePreferL1); hipFuncSetCacheConfig(LocateMaxMin,hipFuncCachePreferL1); hipFuncSetCacheConfig(RemoveOutlier,hipFuncCachePreferL1); hipFuncSetCacheConfig(OriMag,hipFuncCachePreferL1); hipFuncSetCacheConfig(KeyPoints,hipFuncCachePreferL1); hipEvent_t start, stop; hipEventCreate(&start); hipEventCreate(&stop); PyramidKDoG( PyKDoG,octvs,intvls); ResizeImage(Image,images,octvs); //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Reservo Memoria GPU hipMalloc(&pyDoG,sizeof(ArrayImage)*images.size()*PyKDoG.size()); hipMalloc(&minMax,sizeof(MinMax)*intvls*images.size()); //cout<<hipGetErrorString(e)<<" hipMalloc"<<endl; hipEventRecord(start, 0); for (int i = 0; i < images.size() ; ++i) { float * img_D; int sizeImage = images[i].rows*images[i].cols; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Reservo Memoria GPU hipMalloc(&img_D,sizeof(float)*sizeImage);///imagenes //cout<<hipGetErrorString(e)<<" hipMalloc"<<endl; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Copio Memoria GPU hipMemcpy(img_D,images[i].ptr<float>(),sizeof(float)*sizeImage,hipMemcpyHostToDevice); //cout<<hipGetErrorString(e)<<" cudaMemCopyHD"<<endl; int imgBlocks= ceil((double) images[i].cols/BW); //////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////Convolucion de mascara con imagen /////////////////////////////////////////////////////////////////////Una Octava or ciclo for (int m = 0; m < PyKDoG.size(); ++m){ float * pkDoG_D; float * out_D; //float * out= new float[sizeImage]; int sizeMask=PyKDoG[m].rows*PyKDoG[m].cols; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Reservo Memoria GPU hipMalloc(&pkDoG_D,sizeof(float)*sizeMask);//mascaras //cout<<hipGetErrorString(e)<<" cudaMalloc________Mask "<<endl; hipMalloc(&out_D,sizeof(float)*sizeImage); //cout<<hipGetErrorString(e)<<" cudaMalloc________Mask"<<endl; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Copio Memoria GPU hipMemcpy(pkDoG_D,PyKDoG[m].ptr<float>(),sizeof(float)*sizeMask,hipMemcpyHostToDevice); //cout<<hipGetErrorString(e)<<" cudaMemCopyHD________Mask"<<endl; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Lanzo Kernel hipLaunchKernelGGL(( Convolution), dim3(imgBlocks),dim3(1024), 0, 0, img_D,pkDoG_D,pyDoG,PyKDoG[m].rows,PyKDoG[m].cols,images[i].rows,images[i].cols,out_D,idxPyDoG); //hipDeviceSynchronize(); ++idxPyDoG; hipFree(pkDoG_D); //hipMemcpy(out,out_D,sizeof(float)*sizeImage,hipMemcpyDeviceToHost); //cout<<hipGetErrorString(e)<<" cudaMemCopyDH________Mask"<<endl; //Mat image_out(images[i].rows,images[i].cols,CV_32F,out); //cout<<image_out<<endl; //imshow("PyDoG",image_out); //waitKey(0); //destroyAllWindows(); //delete(out); //hipFree(out_D); } hipFree(img_D); //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// } int maskC =PyKDoG[0].cols; int idxmM=0; for (int i = 0; i <images.size() ; ++i) { int sizeImage = images[i].rows*images[i].cols; int imgBlocks= ceil((double) images[i].cols/BW); //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////Busqueda de MinMax /////////////////////////////////////////////////////////////////////Una Octava or ciclo int m=0; for(m = mMidx; m < mMidx+intvls; ++m){ float * out_D; //float * out = new float[sizeImage]; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Reservo Memoria GPU hipMalloc(&out_D,sizeof(float)*sizeImage); //cout<<hipGetErrorString(e)<<" cudaMalloc________Mask"<<endl; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Lanzo Kernel ///////entrega ya los puntos descartanbdo los de bajo contraste hipLaunchKernelGGL(( LocateMaxMin), dim3(imgBlocks),dim3(1024), 0, 0, pyDoG,m,out_D,minMax,maskC,images[i].rows,images[i].cols,idxmM); ++idxmM; //hipDeviceSynchronize(); //hipMemcpy(out,out_D,sizeof(float)*sizeImage,hipMemcpyDeviceToHost); //cout<<hipGetErrorString(e)<<" cudaMemCopyDH________Mask"<<endl; //Mat image_out(images[i].rows,images[i].cols,CV_32F,out); //imshow("MinMax",image_out); //waitKey(0); //destroyAllWindows(); //delete(out); } mMidx=m+2; //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Remover outliers idxPyDoG=1, idxmM=0; for(int i = 0; i< images.size(); ++i ) { float* out_D; int sizeImage = images[i].rows*images[i].cols; int imgBlocks= ceil((double) images[i].cols/BW); hipMalloc(&out_D,sizeof(float)*sizeImage); //float * out = new float[sizeImage]; for (int j = 0; j < intvls; ++j) { hipLaunchKernelGGL(( RemoveOutlier), dim3(imgBlocks),dim3(1024), 0, 0, pyDoG,minMax,idxmM,idxPyDoG, images[i].rows,images[i].cols,out_D); //hipMemcpy(out,out_D,sizeof(float)*sizeImage,hipMemcpyDeviceToHost); //Mat image_out(images[i].rows,images[i].cols,CV_32F,out); //imshow("MinMax Filtrados",image_out); //waitKey(0); //destroyAllWindows(); ++idxmM; ++idxPyDoG; } idxPyDoG+=2; //delete(out); hipFree(out_D); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Calculo de Orientaciones y magnitud en DoG ArrayImage * Mag; ArrayImage * Ori; hipMalloc(&Mag,sizeof(ArrayImage)*intvls*images.size()); hipMalloc(&Ori,sizeof(ArrayImage)*intvls*images.size()); idxPyDoG=1; int idxMagOri=0; for(int i = 0; i< images.size(); ++i ) { float * MagAux; float * OriAux; int sizeImage = images[i].rows*images[i].cols; int imgBlocks= ceil((double) images[i].cols/BW); hipMalloc(&MagAux,sizeof(float)*sizeImage); hipMalloc(&OriAux,sizeof(float)*sizeImage); //float * out = new float[sizeImage]; for (int j = 0; j < intvls; ++j) { hipLaunchKernelGGL(( OriMag), dim3(imgBlocks),dim3(1024), 0, 0, pyDoG,idxPyDoG, images[i].rows,images[i].cols,Mag,Ori,idxMagOri,MagAux,OriAux); //hipMemcpy(out,OriAux,sizeof(float)*sizeImage,hipMemcpyDeviceToHost); //Mat image_out(images[i].rows,images[i].cols,CV_32F,out); //imshow("tesuto",image_out); //waitKey(0); //destroyAllWindows(); ++idxMagOri; ++idxPyDoG; } idxPyDoG+=2; //delete(out); //hipFree(MagAux); //hipFree(OriAux); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Obtener orientacion de keypoints vector<KeyPoint> KPoints; idxmM=0; for(int i = 0; i< images.size(); ++i ) { float sigma=sqrt(2.0f); int imgBlocks= ceil((double) images[i].cols/BW); keyPoint * KP; //keyPoint * KP_host = new keyPoint[images[i].rows*images[i].cols]; hipMalloc(&KP,sizeof(keyPoint)*images[i].rows*images[i].cols); for (int j = 0; j < intvls; ++j) { hipLaunchKernelGGL(( KeyPoints), dim3(imgBlocks),dim3(128), 0, 0, Mag, Ori, minMax , idxmM, KP, sigma, images[i].rows,images[i].cols, i ); // hipMemcpy(KP_host,KP,sizeof(keyPoint)*images[i].rows*images[i].cols,hipMemcpyDeviceToHost); sigma= pow(2.0,1.0/ intvls ) * sigma; ++idxmM; /* for(int k=0; k<(images[i].rows*images[i].cols); ++k){ if( !(KP_host[k].octv <0) ){ //cout<<idxmM<<endl; if (i>0) { KP_host[k].x*=pow(2,i); KP_host[k].y*=pow(2,i); } KeyPoint aux(KP_host[k].x,KP_host[k].y,KP_host[k].size,KP_host[k].orientacion ,0,KP_host[k].octv); //cout<<KP_host[k].size<<endl; KPoints.push_back(aux); } }*/ } //delete(KP_host); hipFree(KP); } hipEventRecord(stop, 0); hipEventSynchronize(stop); float elapsedTime; hipEventElapsedTime(&elapsedTime,start, stop); cout<< "Tiempo total "<<elapsedTime << " en milseg"<<endl; hipEventDestroy(start); hipEventDestroy(stop); //cout<<KPoints.size()<<endl; /* Mat out; drawKeypoints(I,KPoints,out); imshow("Puntos Caracteristicos SIFT",out); waitKey(0); destroyAllWindows(); /*Ptr<DescriptorExtractor> featureExtractor = DescriptorExtractor::create("SIFT"); Mat descriptors; featureExtractor->compute(I, KPoints, descriptors); Mat outputImage; Scalar keypointColor = Scalar(255, 0, 0); drawKeypoints(I, KPoints, outputImage, keypointColor, DrawMatchesFlags::DEFAULT); imshow("test",outputImage); waitKey(0); destroyAllWindows();*/ hipFree(Ori); hipFree(Mag); hipFree(pyDoG); hipFree(minMax); return 0; }
763fc7c30e2df8733e76c74def9f6ae9f9fbb659.cu
#include <SiftFeatures.h> __global__ void Convolution(float* image,float* mask, ArrayImage* PyDoG, int maskR,int maskC, int imgR,int imgC, float* imgOut, int idxPyDoG) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; int iImg=0; float aux=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// if(iImg < imgC*imgR){ int condition=maskC/2+imgC*(floor((double)maskC/2)); if (iImg-condition < 0 || ///condicion arriba iImg+condition > imgC*imgR || ///condicion abajo iImg%imgC < maskC/2 || ///condicion izquierda iImg%imgC > (imgC-1)-(maskC/2) ) ///condicion derecha { aux=0; }else{ int itMask = 0; int itImg=iImg-condition; for (int j = 0; j < maskR; ++j) { for (int h = 0; h < maskC; ++h) { aux+=image[itImg]*mask[itMask]; ++itMask; ++itImg; } itImg+=imgC-maskC; } } imgOut[iImg]=aux; aux=0; } } PyDoG[idxPyDoG].image=imgOut; } __global__ void LocateMaxMin(ArrayImage* PyDoG, int idxPyDoG , float * imgOut ,MinMax * mM, int maskC, int imgR,int imgC, int idxmM) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { //int min=0; //int max=0; float value=0.0; //float compare =0.0; ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// if(iImg < imgC*imgR){ int condition=maskC/2+imgC*(floor((double)maskC/2)); if (iImg-condition < 0 || ///condicion arriba iImg+condition > imgC*imgR || ///condicion abajo iImg%imgC < maskC/2 || ///condicion izquierda iImg%imgC > (imgC-1)-(maskC/2) ) ///condicion derecha { imgOut[iImg]=0; } else{ value=PyDoG[idxPyDoG].image[iImg]; if(value > PyDoG[idxPyDoG-1].image[iImg-(1+imgC)] && value > PyDoG[idxPyDoG-1].image[iImg-imgC] && value > PyDoG[idxPyDoG-1].image[iImg-(imgC-1)] && value > PyDoG[idxPyDoG-1].image[iImg-1] && value > PyDoG[idxPyDoG-1].image[iImg] && value > PyDoG[idxPyDoG-1].image[iImg+1] && value > PyDoG[idxPyDoG-1].image[iImg+(imgC-1)] && value > PyDoG[idxPyDoG-1].image[iImg+imgC] && value > PyDoG[idxPyDoG-1].image[iImg+(1+imgC)] && value > PyDoG[idxPyDoG].image[iImg-(1+imgC)] && value > PyDoG[idxPyDoG].image[iImg-imgC] && value > PyDoG[idxPyDoG].image[iImg-(imgC-1)] && value > PyDoG[idxPyDoG].image[iImg-1] && value > PyDoG[idxPyDoG].image[iImg+1] && value > PyDoG[idxPyDoG].image[iImg+(imgC-1)] && value > PyDoG[idxPyDoG].image[iImg+imgC] && value > PyDoG[idxPyDoG].image[iImg+(1+imgC)] && value > PyDoG[idxPyDoG+1].image[iImg-(1+imgC)] && value > PyDoG[idxPyDoG+1].image[iImg-imgC] && value > PyDoG[idxPyDoG+1].image[iImg-(imgC-1)] && value > PyDoG[idxPyDoG+1].image[iImg-1] && value > PyDoG[idxPyDoG+1].image[iImg] && value > PyDoG[idxPyDoG+1].image[iImg+1] && value > PyDoG[idxPyDoG+1].image[iImg+(imgC-1)] && value > PyDoG[idxPyDoG+1].image[iImg+imgC] && value > PyDoG[idxPyDoG+1].image[iImg+(1+imgC)]) {///Max imgOut[iImg]=1; }else if(value < PyDoG[idxPyDoG-1].image[iImg-(1+imgC)] && value < PyDoG[idxPyDoG-1].image[iImg-imgC] && value < PyDoG[idxPyDoG-1].image[iImg-(imgC-1)] && value < PyDoG[idxPyDoG-1].image[iImg-1] && value < PyDoG[idxPyDoG-1].image[iImg] && value < PyDoG[idxPyDoG-1].image[iImg+1] && value < PyDoG[idxPyDoG-1].image[iImg+(imgC-1)] && value < PyDoG[idxPyDoG-1].image[iImg+imgC] && value < PyDoG[idxPyDoG-1].image[iImg+(1+imgC)] && value < PyDoG[idxPyDoG].image[iImg-(1+imgC)] && value < PyDoG[idxPyDoG].image[iImg-imgC] && value < PyDoG[idxPyDoG].image[iImg-(imgC-1)] && value < PyDoG[idxPyDoG].image[iImg-1] && value < PyDoG[idxPyDoG].image[iImg+1] && value < PyDoG[idxPyDoG].image[iImg+(imgC-1)] && value < PyDoG[idxPyDoG].image[iImg+imgC] && value < PyDoG[idxPyDoG].image[iImg+(1+imgC)] && value < PyDoG[idxPyDoG+1].image[iImg-(1+imgC)] && value < PyDoG[idxPyDoG+1].image[iImg-imgC] && value < PyDoG[idxPyDoG+1].image[iImg-(imgC-1)] && value < PyDoG[idxPyDoG+1].image[iImg-1] && value < PyDoG[idxPyDoG+1].image[iImg] && value < PyDoG[idxPyDoG+1].image[iImg+1] && value < PyDoG[idxPyDoG+1].image[iImg+(imgC-1)] && value < PyDoG[idxPyDoG+1].image[iImg+imgC] && value < PyDoG[idxPyDoG+1].image[iImg+(1+imgC)]){//Min imgOut[iImg]=1; } else { imgOut[iImg]=0; } } } } mM[idxmM].minMax=imgOut; } __global__ void RemoveOutlier(ArrayImage* PyDoG, MinMax * mM, int idxmM, int idxPyDoG, int imgR,int imgC ,float* auxOut) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// if(iImg < imgC*imgR){ if(mM[idxmM].minMax[iImg]>0 && fabs(PyDoG[idxPyDoG].image[iImg])> 0.024) { float d, dxx, dyy, dxy, tr, det; d = PyDoG[idxPyDoG].image[iImg]; dxx = PyDoG[idxPyDoG].image[iImg-imgC]+ PyDoG[idxPyDoG].image[iImg+imgC] - 2*d; dyy = PyDoG[idxPyDoG].image[iImg-1]+ PyDoG[idxPyDoG].image[iImg+1] - 2*d; dxy = (PyDoG[idxPyDoG].image[iImg-imgC-1] + PyDoG[idxPyDoG].image[iImg+1+imgC] - PyDoG[idxPyDoG].image[iImg+imgC-1] - PyDoG[idxPyDoG].image[iImg-imgC+1])/4.0; tr = dxx + dyy; det = dxx*dyy - dxy*dxy; /* if(det <= 0 ) mM[idxmM].minMax[iImg]=0; else if( (tr*tr/det) < 12.1){ mM[idxmM].minMax[iImg]=1; }else{ mM[idxmM].minMax[iImg]=0; }*/ if(det<0 || tr*tr/det > 7.2) { mM[idxmM].minMax[iImg]=0; } }else { mM[idxmM].minMax[iImg]=0; } auxOut[iImg]=mM[idxmM].minMax[iImg]; } } } __global__ void OriMag(ArrayImage* PyDoG, int idxPyDoG, int imgR,int imgC , ArrayImage* Mag, ArrayImage* Ori, int idxMagOri, float* MagAux, float* OriAux) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; float dx,dy; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// if(iImg < imgC*imgR){ int condition=1/2+imgC*(floor((double)1/2)); if (iImg-condition < 0 || ///condicion arriba iImg+condition > imgC*imgR || ///condicion abajo iImg%imgC < 1/2 || ///condicion izquierda iImg%imgC > (imgC-1)-(1/2) ) ///condicion derecha { OriAux[iImg]=0; MagAux[iImg]=0; } else{ dx=PyDoG[idxPyDoG].image[iImg+1]-PyDoG[idxPyDoG].image[iImg-1]; dy=PyDoG[idxPyDoG].image[iImg+imgC]-PyDoG[idxPyDoG].image[iImg-imgC]; MagAux[iImg]=sqrt(dx*dx + dy*dy); OriAux[iImg]=atan2(dy,dx); } } } Mag[idxMagOri].image= MagAux; Ori[idxMagOri].image= OriAux; } __global__ void KeyPoints(ArrayImage * Mag, ArrayImage * Ori, MinMax * mM , int idxMOmM, keyPoint * KP, float sigma, int imgR,int imgC, int octava ) { int tid= threadIdx.x; int bid= blockIdx.x; int bDim=blockDim.x; int gDim=gridDim.x; float o = 0, val=0; int x=0, y=0, octv=-1; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/(gDim*bDim)); ////////numero de veces que caben ////////los hilos en la imagen. for(int i = 0; i <pxlThrd; ++i)///////////////////////////// Strike { ////////////////////////////////////// //////////////////////////////////////Calculo de indices iImg=(tid+(bDim*bid)) + (i*gDim*bDim); //// pixel en el que trabajara el hilo ////////////////////////////////////// ////////////////////////////////////// octv=-1; if(iImg < imgC*imgR ){ if(mM[idxMOmM].minMax[iImg]>0 ){ float histo[36]={0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; octv=octava; x=iImg%imgC; y=iImg/imgC; int idxMO= (iImg-5)-(5*imgC); float exp_denom = 2.0 * sigma * sigma; float w; int bin; for (int i = -5; i < 6; ++i) { for (int j = -5; j < 6; ++j) { w = exp( -( i*i + j*j ) / exp_denom ); bin =(Ori[idxMOmM].image[idxMO]<0)?round((double) (18*(6.283185307-Ori[idxMOmM].image[idxMO])/3.141592654)): round((double) (18*Ori[idxMOmM].image[idxMO]/3.141592654)); histo[bin]+= w*Mag[idxMOmM].image[idxMO]; ++idxMO; } idxMO=idxMO+imgC-11; } int idxH=0; float valMaxH = histo[0]; for (int i = 1; i < 36; ++i) { if(histo[i]>valMaxH){ idxH = i; valMaxH=histo[i]; } } //printf("%f\n", valMaxH); int l = (idxH == 0)? 35:idxH-1; int r = (idxH+1)%36; float bin_; bin_= idxH + ((0.5*(histo[l]-histo[r]))/(histo[l]-(2*histo[idxH])+histo[r])); bin_= ( bin_ < 0 )? 36 + bin_ : ( bin_ >= 36 )? bin_ - 36 : bin_; o=((360*bin_)/36);//-3.141592654; val=valMaxH; } else{ o=-1.0; x=-1; y=-1; octv=-1; } KP[iImg].orientacion=o; KP[iImg].x=x; KP[iImg].y=y; KP[iImg].octv=octv; KP[iImg].size=val; } } } __global__ void CountKeyPoint(MinMax * mM, int idxmM, int imgR, int imgC, int * numKeyP) { int tid= threadIdx.x; int bDim=blockDim.x; __shared__ int num; int iImg=0; int pxlThrd = ceil((double)(imgC*imgR)/bDim); ////////numero de veces que caben if(tid==0) num=0; __syncthreads(); for(int i = 0; i < pxlThrd; ++i)///////////////////////////// Strike { iImg= tid+(i*bDim); if(iImg < imgC*imgR && mM[idxmM].minMax[iImg]>0){ atomicAdd(&num,1); } } numKeyP[0]=num; } void MaskGenerator(double sigma, int size,Mat mask){//Generate Gaussian Kernel Mat aux = getGaussianKernel(size,sigma,CV_32F); Mat aux_t; transpose(aux,aux_t); mask=aux*aux_t; } int ResizeImage(Mat image,vector<Mat>& images, int octvs){ images.push_back(image); for(int i=0; i<octvs-1; ++i) { Mat aux = images[i]; resize(aux,aux,Size(images[i].cols/2,images[i].rows/2)); images.push_back(aux); } return 0; } int PyramidKDoG(vector<Mat> & PyKDoG, int octvs, int intvls){ vector<double> sig; double sigma =sqrt(2.0f); vector<Mat> PyGauss; Mat resizeI; int size = 11;//size of gaussian mask Mat mask=Mat::ones(size,size,CV_32F); MaskGenerator(1,size,mask); PyGauss.push_back(mask); for(int i=1; i<intvls+3; ++i){ Mat aux=Mat::ones(size,size,CV_32F); double sigmaf=sqrt(pow(2.0,2.0/intvls)-1) * sigma; sigma= pow(2.0,1.0/ intvls ) * sigma; MaskGenerator(sigmaf,size,aux); PyGauss.push_back(aux); } ////////////////////////////// ///////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Resta de Gausianas for(int i=0; i<intvls+2; ++i){ Mat aux=Mat::ones(size,size,CV_32F); subtract(PyGauss[i+1],PyGauss[i],aux); PyKDoG.push_back(aux); } /////////////////////////////////////////////////////////////////////////////////////// return 0; } int foundIndexesMaxMin(float* minMax,vector<int*> & idxMinMax, int count ) { vector<int> idxmM; for (int c = 0; c < count; ++c) { if (minMax[c]==0.0) { idxmM.push_back(c); //cout<<c<<endl; } } idxMinMax.push_back(idxmM.data()); return 0; } int SiftFeatures(Mat Image, vector<Mat> PyDoG,Mat I){ const int intvls = 3; int octvs; //cudaError_t e; octvs = log( min( Image.rows, Image.cols ) ) / log(2) - 2; vector<Mat> PyKDoG; vector<Mat> images; ArrayImage * pyDoG; MinMax * minMax; int mMidx=1; int idxPyDoG=0; cudaFuncSetCacheConfig(Convolution,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(LocateMaxMin,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(RemoveOutlier,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(OriMag,cudaFuncCachePreferL1); cudaFuncSetCacheConfig(KeyPoints,cudaFuncCachePreferL1); cudaEvent_t start, stop; cudaEventCreate(&start); cudaEventCreate(&stop); PyramidKDoG( PyKDoG,octvs,intvls); ResizeImage(Image,images,octvs); //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Reservo Memoria GPU cudaMalloc(&pyDoG,sizeof(ArrayImage)*images.size()*PyKDoG.size()); cudaMalloc(&minMax,sizeof(MinMax)*intvls*images.size()); //cout<<cudaGetErrorString(e)<<" cudaMalloc"<<endl; cudaEventRecord(start, 0); for (int i = 0; i < images.size() ; ++i) { float * img_D; int sizeImage = images[i].rows*images[i].cols; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Reservo Memoria GPU cudaMalloc(&img_D,sizeof(float)*sizeImage);///imagenes //cout<<cudaGetErrorString(e)<<" cudaMalloc"<<endl; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Copio Memoria GPU cudaMemcpy(img_D,images[i].ptr<float>(),sizeof(float)*sizeImage,cudaMemcpyHostToDevice); //cout<<cudaGetErrorString(e)<<" cudaMemCopyHD"<<endl; int imgBlocks= ceil((double) images[i].cols/BW); //////////////////////////////////////////////////////////////////////////////////////// ///////////////////////////////////////////////////////Convolucion de mascara con imagen /////////////////////////////////////////////////////////////////////Una Octava or ciclo for (int m = 0; m < PyKDoG.size(); ++m){ float * pkDoG_D; float * out_D; //float * out= new float[sizeImage]; int sizeMask=PyKDoG[m].rows*PyKDoG[m].cols; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Reservo Memoria GPU cudaMalloc(&pkDoG_D,sizeof(float)*sizeMask);//mascaras //cout<<cudaGetErrorString(e)<<" cudaMalloc________Mask "<<endl; cudaMalloc(&out_D,sizeof(float)*sizeImage); //cout<<cudaGetErrorString(e)<<" cudaMalloc________Mask"<<endl; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Copio Memoria GPU cudaMemcpy(pkDoG_D,PyKDoG[m].ptr<float>(),sizeof(float)*sizeMask,cudaMemcpyHostToDevice); //cout<<cudaGetErrorString(e)<<" cudaMemCopyHD________Mask"<<endl; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Lanzo Kernel Convolution<<<imgBlocks,1024>>>(img_D,pkDoG_D,pyDoG,PyKDoG[m].rows,PyKDoG[m].cols,images[i].rows,images[i].cols,out_D,idxPyDoG); //cudaDeviceSynchronize(); ++idxPyDoG; cudaFree(pkDoG_D); //cudaMemcpy(out,out_D,sizeof(float)*sizeImage,cudaMemcpyDeviceToHost); //cout<<cudaGetErrorString(e)<<" cudaMemCopyDH________Mask"<<endl; //Mat image_out(images[i].rows,images[i].cols,CV_32F,out); //cout<<image_out<<endl; //imshow("PyDoG",image_out); //waitKey(0); //destroyAllWindows(); //delete(out); //cudaFree(out_D); } cudaFree(img_D); //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// } int maskC =PyKDoG[0].cols; int idxmM=0; for (int i = 0; i <images.size() ; ++i) { int sizeImage = images[i].rows*images[i].cols; int imgBlocks= ceil((double) images[i].cols/BW); //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////Busqueda de MinMax /////////////////////////////////////////////////////////////////////Una Octava or ciclo int m=0; for(m = mMidx; m < mMidx+intvls; ++m){ float * out_D; //float * out = new float[sizeImage]; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Reservo Memoria GPU cudaMalloc(&out_D,sizeof(float)*sizeImage); //cout<<cudaGetErrorString(e)<<" cudaMalloc________Mask"<<endl; //////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Lanzo Kernel ///////entrega ya los puntos descartanbdo los de bajo contraste LocateMaxMin<<<imgBlocks,1024>>>(pyDoG,m,out_D,minMax,maskC,images[i].rows,images[i].cols,idxmM); ++idxmM; //cudaDeviceSynchronize(); //cudaMemcpy(out,out_D,sizeof(float)*sizeImage,cudaMemcpyDeviceToHost); //cout<<cudaGetErrorString(e)<<" cudaMemCopyDH________Mask"<<endl; //Mat image_out(images[i].rows,images[i].cols,CV_32F,out); //imshow("MinMax",image_out); //waitKey(0); //destroyAllWindows(); //delete(out); } mMidx=m+2; //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// //////////////////////////////////////////////////////////////////////////////////////// } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Remover outliers idxPyDoG=1, idxmM=0; for(int i = 0; i< images.size(); ++i ) { float* out_D; int sizeImage = images[i].rows*images[i].cols; int imgBlocks= ceil((double) images[i].cols/BW); cudaMalloc(&out_D,sizeof(float)*sizeImage); //float * out = new float[sizeImage]; for (int j = 0; j < intvls; ++j) { RemoveOutlier<<<imgBlocks,1024>>>(pyDoG,minMax,idxmM,idxPyDoG, images[i].rows,images[i].cols,out_D); //cudaMemcpy(out,out_D,sizeof(float)*sizeImage,cudaMemcpyDeviceToHost); //Mat image_out(images[i].rows,images[i].cols,CV_32F,out); //imshow("MinMax Filtrados",image_out); //waitKey(0); //destroyAllWindows(); ++idxmM; ++idxPyDoG; } idxPyDoG+=2; //delete(out); cudaFree(out_D); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Calculo de Orientaciones y magnitud en DoG ArrayImage * Mag; ArrayImage * Ori; cudaMalloc(&Mag,sizeof(ArrayImage)*intvls*images.size()); cudaMalloc(&Ori,sizeof(ArrayImage)*intvls*images.size()); idxPyDoG=1; int idxMagOri=0; for(int i = 0; i< images.size(); ++i ) { float * MagAux; float * OriAux; int sizeImage = images[i].rows*images[i].cols; int imgBlocks= ceil((double) images[i].cols/BW); cudaMalloc(&MagAux,sizeof(float)*sizeImage); cudaMalloc(&OriAux,sizeof(float)*sizeImage); //float * out = new float[sizeImage]; for (int j = 0; j < intvls; ++j) { OriMag<<<imgBlocks,1024>>>(pyDoG,idxPyDoG, images[i].rows,images[i].cols,Mag,Ori,idxMagOri,MagAux,OriAux); //cudaMemcpy(out,OriAux,sizeof(float)*sizeImage,cudaMemcpyDeviceToHost); //Mat image_out(images[i].rows,images[i].cols,CV_32F,out); //imshow("tesuto",image_out); //waitKey(0); //destroyAllWindows(); ++idxMagOri; ++idxPyDoG; } idxPyDoG+=2; //delete(out); //cudaFree(MagAux); //cudaFree(OriAux); } /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////////////////////////////////// /////////////////////////////////////////////////////////////////////Obtener orientacion de keypoints vector<KeyPoint> KPoints; idxmM=0; for(int i = 0; i< images.size(); ++i ) { float sigma=sqrt(2.0f); int imgBlocks= ceil((double) images[i].cols/BW); keyPoint * KP; //keyPoint * KP_host = new keyPoint[images[i].rows*images[i].cols]; cudaMalloc(&KP,sizeof(keyPoint)*images[i].rows*images[i].cols); for (int j = 0; j < intvls; ++j) { KeyPoints<<<imgBlocks,128>>>(Mag, Ori, minMax , idxmM, KP, sigma, images[i].rows,images[i].cols, i ); // cudaMemcpy(KP_host,KP,sizeof(keyPoint)*images[i].rows*images[i].cols,cudaMemcpyDeviceToHost); sigma= pow(2.0,1.0/ intvls ) * sigma; ++idxmM; /* for(int k=0; k<(images[i].rows*images[i].cols); ++k){ if( !(KP_host[k].octv <0) ){ //cout<<idxmM<<endl; if (i>0) { KP_host[k].x*=pow(2,i); KP_host[k].y*=pow(2,i); } KeyPoint aux(KP_host[k].x,KP_host[k].y,KP_host[k].size,KP_host[k].orientacion ,0,KP_host[k].octv); //cout<<KP_host[k].size<<endl; KPoints.push_back(aux); } }*/ } //delete(KP_host); cudaFree(KP); } cudaEventRecord(stop, 0); cudaEventSynchronize(stop); float elapsedTime; cudaEventElapsedTime(&elapsedTime,start, stop); cout<< "Tiempo total "<<elapsedTime << " en milseg"<<endl; cudaEventDestroy(start); cudaEventDestroy(stop); //cout<<KPoints.size()<<endl; /* Mat out; drawKeypoints(I,KPoints,out); imshow("Puntos Caracteristicos SIFT",out); waitKey(0); destroyAllWindows(); /*Ptr<DescriptorExtractor> featureExtractor = DescriptorExtractor::create("SIFT"); Mat descriptors; featureExtractor->compute(I, KPoints, descriptors); Mat outputImage; Scalar keypointColor = Scalar(255, 0, 0); drawKeypoints(I, KPoints, outputImage, keypointColor, DrawMatchesFlags::DEFAULT); imshow("test",outputImage); waitKey(0); destroyAllWindows();*/ cudaFree(Ori); cudaFree(Mag); cudaFree(pyDoG); cudaFree(minMax); return 0; }
d814c3e71abaeb3292f31316d8d2beb3c752ba0d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" __global__ void stochasticGradientDescentKernel ( int numberIterations, int* parameterIndices, int* counts, int dimension, float* parameters, float* gradient, float learningRate) { int updateIndex = blockIdx.x; int parameterIndex = parameterIndices[updateIndex]; int count = counts[updateIndex]; if(parameterIndex != -1 && count > 0) { float scalingFactor = 1.0 / (float)count; int startEntryIndex = (blockIdx.y * blockDim.x + threadIdx.x) * numberIterations; int firstParameterEntryIndex = parameterIndex * dimension; int startParameterEntryIndex = firstParameterEntryIndex + startEntryIndex; int startGradientEntryIndex = updateIndex * dimension + startEntryIndex; int exclusiveEndParameterEntryIndex = min(startParameterEntryIndex + numberIterations, firstParameterEntryIndex + dimension); int parameterEntryIndex = startParameterEntryIndex; int gradientEntryIndex = startGradientEntryIndex; while(parameterEntryIndex < exclusiveEndParameterEntryIndex) { float scaledDerivative = scalingFactor * gradient[gradientEntryIndex]; parameters[parameterEntryIndex] -= learningRate * scaledDerivative; parameterEntryIndex++; gradientEntryIndex++; } } }
d814c3e71abaeb3292f31316d8d2beb3c752ba0d.cu
__global__ void stochasticGradientDescentKernel ( int numberIterations, int* parameterIndices, int* counts, int dimension, float* parameters, float* gradient, float learningRate) { int updateIndex = blockIdx.x; int parameterIndex = parameterIndices[updateIndex]; int count = counts[updateIndex]; if(parameterIndex != -1 && count > 0) { float scalingFactor = 1.0 / (float)count; int startEntryIndex = (blockIdx.y * blockDim.x + threadIdx.x) * numberIterations; int firstParameterEntryIndex = parameterIndex * dimension; int startParameterEntryIndex = firstParameterEntryIndex + startEntryIndex; int startGradientEntryIndex = updateIndex * dimension + startEntryIndex; int exclusiveEndParameterEntryIndex = min(startParameterEntryIndex + numberIterations, firstParameterEntryIndex + dimension); int parameterEntryIndex = startParameterEntryIndex; int gradientEntryIndex = startGradientEntryIndex; while(parameterEntryIndex < exclusiveEndParameterEntryIndex) { float scaledDerivative = scalingFactor * gradient[gradientEntryIndex]; parameters[parameterEntryIndex] -= learningRate * scaledDerivative; parameterEntryIndex++; gradientEntryIndex++; } } }
8cc42a85712c70c39ab9d3cb26552c6adeefbc3d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <math.h> #include <stdio.h> #include "kernel.h" __global__ void mulKernel(double* c, double* a, double* b,int n,int p) { int row = threadIdx.x+blockDim.x*blockIdx.x; if (row >= n) return; double temp = 0; for (int i = 0; i < p; i++) { temp += a[row * p + i] * b[i]; } c[row] = temp; } __global__ void mulTransposeKernel(double* c, double* a, double* b,int n,int p,double alpha=1.0) { int col = threadIdx.x + blockDim.x * blockIdx.x; if (col >= n) return; double temp = 0; for (int i = 0; i < n; i++) { temp += a[i * p + col] * b[i]; } c[col] += temp*alpha; } __global__ void minusKernel(double* c, double* a, double* b,int n) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index >= n) return; c[index] = a[index] - b[index]; } __global__ void arrayXarray(double* c, double* a, double* b,int n) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index >= n) return; c[index] = a[index] * b[index]; } void tink4(double* theta,double* train_x,double* train_y,int n,int p,int blocksize) { double alpha = 0.0083; double *err_0,*y_pred,*dev_train_x,*dev_train_y,*dev_y_pred, *dev_theta,*dev_err,*dev_err2; for (int i = 0; i < p; i++) theta[i] = (rand() % 10) / 1000.0; hipMalloc((void**)& dev_theta,p * sizeof(double)); hipMalloc((void**)& dev_train_x, n*p * sizeof(double)); hipMalloc((void**)& dev_train_y, n* sizeof(double)); hipMalloc((void**)& dev_y_pred, n* sizeof(double)); hipMalloc((void**)& dev_err, n* sizeof(double)); hipMalloc((void**)& dev_err2, n* sizeof(double)); hipMemcpy(dev_train_x, train_x, p * n * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_train_y, train_y, n * sizeof(double), hipMemcpyHostToDevice); hipMemcpy(dev_theta, theta, p * sizeof(double), hipMemcpyHostToDevice); err_0 = (double*)malloc(n * sizeof(double)); int cnt = 2000000, it = 0; double last_error, error = 100; do { last_error = error; double error2 = 0; mulKernel <<< ceil(n/blocksize),blocksize >> > (dev_y_pred, dev_train_x, dev_theta, n,p); hipDeviceSynchronize(); minusKernel << <ceil(n/blocksize),blocksize>> > (dev_err, dev_y_pred, dev_train_y,n); hipDeviceSynchronize(); arrayXarray << <ceil(n/blocksize),blocksize>> > (dev_err2, dev_err, dev_err,n); hipDeviceSynchronize(); mulTransposeKernel << <1,p >> > (dev_theta, dev_train_x, dev_err,n,p,-alpha/n); hipDeviceSynchronize(); hipMemcpy(err_0, dev_err2, n * sizeof(double), hipMemcpyDeviceToHost); for (int i = 0; i < n; i++) error2 += err_0[i]; error = sqrt(error2); if (error > last_error) alpha /= 1.2; hipDeviceSynchronize(); } while (it++ < cnt && fabs(error - last_error) > epsilon); printf("in %d iterations \n", it); hipMemcpy(theta, dev_theta, p * sizeof(double), hipMemcpyDeviceToHost); hipFree(dev_err); hipFree(dev_train_x); hipFree(dev_train_y); hipFree(dev_y_pred); hipFree(dev_theta); }
8cc42a85712c70c39ab9d3cb26552c6adeefbc3d.cu
 #include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdlib.h> #include <math.h> #include <stdio.h> #include "kernel.h" __global__ void mulKernel(double* c, double* a, double* b,int n,int p) { int row = threadIdx.x+blockDim.x*blockIdx.x; if (row >= n) return; double temp = 0; for (int i = 0; i < p; i++) { temp += a[row * p + i] * b[i]; } c[row] = temp; } __global__ void mulTransposeKernel(double* c, double* a, double* b,int n,int p,double alpha=1.0) { int col = threadIdx.x + blockDim.x * blockIdx.x; if (col >= n) return; double temp = 0; for (int i = 0; i < n; i++) { temp += a[i * p + col] * b[i]; } c[col] += temp*alpha; } __global__ void minusKernel(double* c, double* a, double* b,int n) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index >= n) return; c[index] = a[index] - b[index]; } __global__ void arrayXarray(double* c, double* a, double* b,int n) { int index = threadIdx.x + blockDim.x * blockIdx.x; if (index >= n) return; c[index] = a[index] * b[index]; } void tink4(double* theta,double* train_x,double* train_y,int n,int p,int blocksize) { double alpha = 0.0083; double *err_0,*y_pred,*dev_train_x,*dev_train_y,*dev_y_pred, *dev_theta,*dev_err,*dev_err2; for (int i = 0; i < p; i++) theta[i] = (rand() % 10) / 1000.0; cudaMalloc((void**)& dev_theta,p * sizeof(double)); cudaMalloc((void**)& dev_train_x, n*p * sizeof(double)); cudaMalloc((void**)& dev_train_y, n* sizeof(double)); cudaMalloc((void**)& dev_y_pred, n* sizeof(double)); cudaMalloc((void**)& dev_err, n* sizeof(double)); cudaMalloc((void**)& dev_err2, n* sizeof(double)); cudaMemcpy(dev_train_x, train_x, p * n * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_train_y, train_y, n * sizeof(double), cudaMemcpyHostToDevice); cudaMemcpy(dev_theta, theta, p * sizeof(double), cudaMemcpyHostToDevice); err_0 = (double*)malloc(n * sizeof(double)); int cnt = 2000000, it = 0; double last_error, error = 100; do { last_error = error; double error2 = 0; mulKernel <<< ceil(n/blocksize),blocksize >> > (dev_y_pred, dev_train_x, dev_theta, n,p); cudaDeviceSynchronize(); minusKernel << <ceil(n/blocksize),blocksize>> > (dev_err, dev_y_pred, dev_train_y,n); cudaDeviceSynchronize(); arrayXarray << <ceil(n/blocksize),blocksize>> > (dev_err2, dev_err, dev_err,n); cudaDeviceSynchronize(); mulTransposeKernel << <1,p >> > (dev_theta, dev_train_x, dev_err,n,p,-alpha/n); cudaDeviceSynchronize(); cudaMemcpy(err_0, dev_err2, n * sizeof(double), cudaMemcpyDeviceToHost); for (int i = 0; i < n; i++) error2 += err_0[i]; error = sqrt(error2); if (error > last_error) alpha /= 1.2; cudaDeviceSynchronize(); } while (it++ < cnt && fabs(error - last_error) > epsilon); printf("in %d iterations \n", it); cudaMemcpy(theta, dev_theta, p * sizeof(double), cudaMemcpyDeviceToHost); cudaFree(dev_err); cudaFree(dev_train_x); cudaFree(dev_train_y); cudaFree(dev_y_pred); cudaFree(dev_theta); }
b970b661c4f7ebe6280fe1c0ef209fa00720b863.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "../inc/pricingengine.h" #include <string> #include <vector> #include <numeric> #include <stdexcept> #include <typeinfo> #include <hip/hip_runtime_api.h> #include <hiprand/hiprand_kernel.h> #include "../inc/asianoption.h" #include "../../inc/cudasharedmem.h" using std::string; using std::vector; // RNG init kernel __global__ void initRNG(hiprandState_t * const rngStates, const unsigned int seed) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Initialise the RNG hiprand_init(seed, tid, 0, &rngStates[tid]); } __device__ inline float getPathStep(float &drift, float &diffusion, hiprandState_t &state) { return expf(drift + diffusion * hiprand_normal(&state)); } __device__ inline double getPathStep(double &drift, double &diffusion, hiprandState_t &state) { return exp(drift + diffusion * hiprand_normal_double(&state)); } // Path generation kernel template <typename Real> __global__ void generatePaths(Real * const paths, hiprandState_t * const rngStates, const AsianOption<Real> * const option, const unsigned int numSims, const unsigned int numTimesteps) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; // Compute parameters Real drift = (option->r - static_cast<Real>(0.5) * option->sigma * option->sigma) * option->dt; Real diffusion = option->sigma * sqrt(option->dt); // Initialise the RNG hiprandState_t localState = rngStates[tid]; for (unsigned int i = tid ; i < numSims ; i += step) { // Shift the output pointer Real *output = paths + i; // Simulate the path Real s = static_cast<Real>(1); for (unsigned int t = 0 ; t < numTimesteps ; t++, output += numSims) { s *= getPathStep(drift, diffusion, localState); *output = s; } } } template <typename Real> __device__ Real reduce_sum(Real in) { SharedMemory<Real> sdata; // Perform first level of reduction: // - Write to shared memory unsigned int ltid = threadIdx.x; sdata[ltid] = in; __syncthreads(); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) { if (ltid < s) { sdata[ltid] += sdata[ltid + s]; } __syncthreads(); } return sdata[0]; } // Valuation kernel template <typename Real> __global__ void computeValue(Real * const values, const Real * const paths, const AsianOption<Real> * const option, const unsigned int numSims, const unsigned int numTimesteps) { // Determine thread ID unsigned int bid = blockIdx.x; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; Real sumPayoffs = static_cast<Real>(0); for (unsigned int i = tid ; i < numSims ; i += step) { // Shift the input pointer const Real *path = paths + i; // Compute the arithmetic average Real avg = static_cast<Real>(0); for (unsigned int t = 0 ; t < numTimesteps ; t++, path += numSims) { avg += *path; } avg = avg * option->spot / numTimesteps; // Compute the payoff Real payoff = avg - option->strike; if (option->type == AsianOption<Real>::Put) { payoff = - payoff; } payoff = max(static_cast<Real>(0), payoff); // Accumulate payoff locally sumPayoffs += payoff; } // Reduce within the block sumPayoffs = reduce_sum<Real>(sumPayoffs); // Store the result if (threadIdx.x == 0) values[bid] = sumPayoffs; } template <typename Real> PricingEngine<Real>::PricingEngine(unsigned int numSims, unsigned int device, unsigned int threadBlockSize, unsigned int seed) : m_numSims(numSims), m_device(device), m_threadBlockSize(threadBlockSize), m_seed(seed) { } template <typename Real> void PricingEngine<Real>::operator()(AsianOption<Real> &option) { hipError_t cudaResult = hipSuccess; struct hipDeviceProp_t deviceProperties; struct hipFuncAttributes funcAttributes; // Get device properties cudaResult = hipGetDeviceProperties(&deviceProperties, m_device); if (cudaResult != hipSuccess) { string msg("Could not get device properties: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Check precision is valid unsigned int deviceVersion = deviceProperties.major * 10 + deviceProperties.minor; if (typeid(Real) == typeid(double) && deviceVersion < 13) { throw std::runtime_error("Device does not have double precision support"); } // Attach to GPU cudaResult = hipSetDevice(m_device); if (cudaResult != hipSuccess) { string msg("Could not set CUDA device: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Determine how to divide the work between cores dim3 block; dim3 grid; block.x = m_threadBlockSize; grid.x = (m_numSims + m_threadBlockSize - 1) / m_threadBlockSize; // Aim to launch around ten or more times as many blocks as there // are multiprocessors on the target device. unsigned int blocksPerSM = 10; unsigned int numSMs = deviceProperties.multiProcessorCount; while (grid.x > 2 * blocksPerSM * numSMs) grid.x >>= 1; // Get initRNG function properties and check the maximum block size cudaResult = hipFuncGetAttributes(&funcAttributes, initRNG); if (cudaResult != hipSuccess) { string msg("Could not get function attributes: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for initRNG kernel"); } // Get generatePaths function properties and check the maximum block size cudaResult = hipFuncGetAttributes(&funcAttributes, generatePaths<Real>); if (cudaResult != hipSuccess) { string msg("Could not get function attributes: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for generatePaths kernel"); } // Get computeValue function properties and check the maximum block size cudaResult = hipFuncGetAttributes(&funcAttributes, computeValue<Real>); if (cudaResult != hipSuccess) { string msg("Could not get function attributes: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for computeValue kernel"); } // Setup problem on GPU AsianOption<Real> *d_option = 0; cudaResult = hipMalloc((void **)&d_option, sizeof(AsianOption<Real>)); if (cudaResult != hipSuccess) { string msg("Could not allocate memory on device for option data: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } cudaResult = hipMemcpy(d_option, &option, sizeof(AsianOption<Real>), hipMemcpyHostToDevice); if (cudaResult != hipSuccess) { string msg("Could not copy data to device: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Allocate memory for paths Real *d_paths = 0; int numTimesteps = static_cast<int>(option.tenor / option.dt); cudaResult = hipMalloc((void **)&d_paths, m_numSims * numTimesteps * sizeof(Real)); if (cudaResult != hipSuccess) { string msg("Could not allocate memory on device for paths: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Allocate memory for RNG states hiprandState_t *d_rngStates = 0; cudaResult = hipMalloc((void **)&d_rngStates, grid.x * block.x * sizeof(hiprandState_t)); if (cudaResult != hipSuccess) { string msg("Could not allocate memory on device for RNG state: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Allocate memory for result Real *d_values = 0; cudaResult = hipMalloc((void **)&d_values, grid.x * sizeof(Real)); if (cudaResult != hipSuccess) { string msg("Could not allocate memory on device for partial results: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Initialise RNG hipLaunchKernelGGL(( initRNG), dim3(grid), dim3(block), 0, 0, d_rngStates, m_seed); // Generate paths hipLaunchKernelGGL(( generatePaths<Real>), dim3(grid), dim3(block), 0, 0, d_paths, d_rngStates, d_option, m_numSims, numTimesteps); // Compute value hipLaunchKernelGGL(( computeValue), dim3(grid), dim3(block), block.x * sizeof(Real), 0, d_values, d_paths, d_option, m_numSims, numTimesteps); // Copy partial results back vector<Real> values(grid.x); cudaResult = hipMemcpy(&values[0], d_values, grid.x * sizeof(Real), hipMemcpyDeviceToHost); if (cudaResult != hipSuccess) { string msg("Could not copy partial results to host: "); msg += hipGetErrorString(cudaResult); throw std::runtime_error(msg); } // Complete sum-reduction on host option.value = std::accumulate(values.begin(), values.end(), static_cast<Real>(0)); // Compute the mean option.value /= m_numSims; // Discount to present value option.value *= exp(- option.r * option.tenor); // Cleanup if (d_option) { hipFree(d_option); d_option = 0; } if (d_paths) { hipFree(d_paths); d_paths = 0; } if (d_rngStates) { hipFree(d_rngStates); d_rngStates = 0; } if (d_values) { hipFree(d_values); d_values = 0; } } // Explicit template instantiation template class PricingEngine<float>; template class PricingEngine<double>;
b970b661c4f7ebe6280fe1c0ef209fa00720b863.cu
/* * Copyright 1993-2010 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ #include "../inc/pricingengine.h" #include <string> #include <vector> #include <numeric> #include <stdexcept> #include <typeinfo> #include <cuda_runtime_api.h> #include <curand_kernel.h> #include "../inc/asianoption.h" #include "../../inc/cudasharedmem.h" using std::string; using std::vector; // RNG init kernel __global__ void initRNG(curandState * const rngStates, const unsigned int seed) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; // Initialise the RNG curand_init(seed, tid, 0, &rngStates[tid]); } __device__ inline float getPathStep(float &drift, float &diffusion, curandState &state) { return expf(drift + diffusion * curand_normal(&state)); } __device__ inline double getPathStep(double &drift, double &diffusion, curandState &state) { return exp(drift + diffusion * curand_normal_double(&state)); } // Path generation kernel template <typename Real> __global__ void generatePaths(Real * const paths, curandState * const rngStates, const AsianOption<Real> * const option, const unsigned int numSims, const unsigned int numTimesteps) { // Determine thread ID unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; // Compute parameters Real drift = (option->r - static_cast<Real>(0.5) * option->sigma * option->sigma) * option->dt; Real diffusion = option->sigma * sqrt(option->dt); // Initialise the RNG curandState localState = rngStates[tid]; for (unsigned int i = tid ; i < numSims ; i += step) { // Shift the output pointer Real *output = paths + i; // Simulate the path Real s = static_cast<Real>(1); for (unsigned int t = 0 ; t < numTimesteps ; t++, output += numSims) { s *= getPathStep(drift, diffusion, localState); *output = s; } } } template <typename Real> __device__ Real reduce_sum(Real in) { SharedMemory<Real> sdata; // Perform first level of reduction: // - Write to shared memory unsigned int ltid = threadIdx.x; sdata[ltid] = in; __syncthreads(); // Do reduction in shared mem for (unsigned int s = blockDim.x / 2 ; s > 0 ; s >>= 1) { if (ltid < s) { sdata[ltid] += sdata[ltid + s]; } __syncthreads(); } return sdata[0]; } // Valuation kernel template <typename Real> __global__ void computeValue(Real * const values, const Real * const paths, const AsianOption<Real> * const option, const unsigned int numSims, const unsigned int numTimesteps) { // Determine thread ID unsigned int bid = blockIdx.x; unsigned int tid = blockIdx.x * blockDim.x + threadIdx.x; unsigned int step = gridDim.x * blockDim.x; Real sumPayoffs = static_cast<Real>(0); for (unsigned int i = tid ; i < numSims ; i += step) { // Shift the input pointer const Real *path = paths + i; // Compute the arithmetic average Real avg = static_cast<Real>(0); for (unsigned int t = 0 ; t < numTimesteps ; t++, path += numSims) { avg += *path; } avg = avg * option->spot / numTimesteps; // Compute the payoff Real payoff = avg - option->strike; if (option->type == AsianOption<Real>::Put) { payoff = - payoff; } payoff = max(static_cast<Real>(0), payoff); // Accumulate payoff locally sumPayoffs += payoff; } // Reduce within the block sumPayoffs = reduce_sum<Real>(sumPayoffs); // Store the result if (threadIdx.x == 0) values[bid] = sumPayoffs; } template <typename Real> PricingEngine<Real>::PricingEngine(unsigned int numSims, unsigned int device, unsigned int threadBlockSize, unsigned int seed) : m_numSims(numSims), m_device(device), m_threadBlockSize(threadBlockSize), m_seed(seed) { } template <typename Real> void PricingEngine<Real>::operator()(AsianOption<Real> &option) { cudaError_t cudaResult = cudaSuccess; struct cudaDeviceProp deviceProperties; struct cudaFuncAttributes funcAttributes; // Get device properties cudaResult = cudaGetDeviceProperties(&deviceProperties, m_device); if (cudaResult != cudaSuccess) { string msg("Could not get device properties: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Check precision is valid unsigned int deviceVersion = deviceProperties.major * 10 + deviceProperties.minor; if (typeid(Real) == typeid(double) && deviceVersion < 13) { throw std::runtime_error("Device does not have double precision support"); } // Attach to GPU cudaResult = cudaSetDevice(m_device); if (cudaResult != cudaSuccess) { string msg("Could not set CUDA device: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Determine how to divide the work between cores dim3 block; dim3 grid; block.x = m_threadBlockSize; grid.x = (m_numSims + m_threadBlockSize - 1) / m_threadBlockSize; // Aim to launch around ten or more times as many blocks as there // are multiprocessors on the target device. unsigned int blocksPerSM = 10; unsigned int numSMs = deviceProperties.multiProcessorCount; while (grid.x > 2 * blocksPerSM * numSMs) grid.x >>= 1; // Get initRNG function properties and check the maximum block size cudaResult = cudaFuncGetAttributes(&funcAttributes, initRNG); if (cudaResult != cudaSuccess) { string msg("Could not get function attributes: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for initRNG kernel"); } // Get generatePaths function properties and check the maximum block size cudaResult = cudaFuncGetAttributes(&funcAttributes, generatePaths<Real>); if (cudaResult != cudaSuccess) { string msg("Could not get function attributes: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for generatePaths kernel"); } // Get computeValue function properties and check the maximum block size cudaResult = cudaFuncGetAttributes(&funcAttributes, computeValue<Real>); if (cudaResult != cudaSuccess) { string msg("Could not get function attributes: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } if (block.x > (unsigned int)funcAttributes.maxThreadsPerBlock) { throw std::runtime_error("Block X dimension is too large for computeValue kernel"); } // Setup problem on GPU AsianOption<Real> *d_option = 0; cudaResult = cudaMalloc((void **)&d_option, sizeof(AsianOption<Real>)); if (cudaResult != cudaSuccess) { string msg("Could not allocate memory on device for option data: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } cudaResult = cudaMemcpy(d_option, &option, sizeof(AsianOption<Real>), cudaMemcpyHostToDevice); if (cudaResult != cudaSuccess) { string msg("Could not copy data to device: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Allocate memory for paths Real *d_paths = 0; int numTimesteps = static_cast<int>(option.tenor / option.dt); cudaResult = cudaMalloc((void **)&d_paths, m_numSims * numTimesteps * sizeof(Real)); if (cudaResult != cudaSuccess) { string msg("Could not allocate memory on device for paths: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Allocate memory for RNG states curandState *d_rngStates = 0; cudaResult = cudaMalloc((void **)&d_rngStates, grid.x * block.x * sizeof(curandState)); if (cudaResult != cudaSuccess) { string msg("Could not allocate memory on device for RNG state: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Allocate memory for result Real *d_values = 0; cudaResult = cudaMalloc((void **)&d_values, grid.x * sizeof(Real)); if (cudaResult != cudaSuccess) { string msg("Could not allocate memory on device for partial results: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Initialise RNG initRNG<<<grid, block>>>(d_rngStates, m_seed); // Generate paths generatePaths<Real><<<grid, block>>>(d_paths, d_rngStates, d_option, m_numSims, numTimesteps); // Compute value computeValue<<<grid, block, block.x * sizeof(Real)>>>(d_values, d_paths, d_option, m_numSims, numTimesteps); // Copy partial results back vector<Real> values(grid.x); cudaResult = cudaMemcpy(&values[0], d_values, grid.x * sizeof(Real), cudaMemcpyDeviceToHost); if (cudaResult != cudaSuccess) { string msg("Could not copy partial results to host: "); msg += cudaGetErrorString(cudaResult); throw std::runtime_error(msg); } // Complete sum-reduction on host option.value = std::accumulate(values.begin(), values.end(), static_cast<Real>(0)); // Compute the mean option.value /= m_numSims; // Discount to present value option.value *= exp(- option.r * option.tenor); // Cleanup if (d_option) { cudaFree(d_option); d_option = 0; } if (d_paths) { cudaFree(d_paths); d_paths = 0; } if (d_rngStates) { cudaFree(d_rngStates); d_rngStates = 0; } if (d_values) { cudaFree(d_values); d_values = 0; } } // Explicit template instantiation template class PricingEngine<float>; template class PricingEngine<double>;
912c89b4f10ebbbb586f03c782dc2cd3db08026d.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Example showing the use of CUFFT for fast 1D-convolution using FFT. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <hip/hip_runtime.h> #include <hipfft.h> #include <hipfftXt.h> //#include <helper_functions.h> //#include <helper_cuda.h> #include "cu_stft.h" // Complex data type //////////////////////////////////////////////////////////////////////////////// void my_cufft(Complex *h_signal, int SIGNAL_SIZE) { // Allocate device memory for signal int mem_size = sizeof(Complex)*SIGNAL_SIZE; Complex *d_signal; hipMalloc((void **)&d_signal, mem_size); // Copy host memory to device hipMemcpy(d_signal, h_signal, mem_size, hipMemcpyHostToDevice); hipMemcpy(h_signal, d_signal, mem_size, hipMemcpyDeviceToHost); // CUFFT plan simple API hipfftHandle plan; hipfftPlan1d(&plan, SIGNAL_SIZE, HIPFFT_C2C, 1); // CUFFT plan advanced API hipfftExecC2C(plan, (hipfftComplex *)d_signal, (hipfftComplex *)d_signal, HIPFFT_FORWARD); hipMemcpy(h_signal, d_signal, mem_size, hipMemcpyDeviceToHost); //Destroy CUFFT context hipfftDestroy(plan); hipFree(d_signal); } Complex *my_stft(Complex *h_signal, int s_size, int w_lag, int s_len) { // Allocate device memory for signal //int mem_size = sizeof(Complex)*SIGNAL_SIZE; int x_size = (int)((s_size - s_len) / w_lag); int y_size = s_len; int stft_size = sizeof(Complex)*x_size*y_size; int mem_size = sizeof(Complex)*s_size; Complex *d_signal; hipMalloc((void **)&d_signal, mem_size); // Copy host memory to device hipMemcpy(d_signal, h_signal, mem_size, hipMemcpyHostToDevice); Complex *d_stft; hipMalloc((void **)&d_stft, stft_size); //hipMemcpy(h_signal, d_signal, mem_size, //hipMemcpyDeviceToHost); // CUFFT plan simple API hipfftHandle plan; hipfftPlan1d(&plan, s_len, HIPFFT_C2C, 1); // CUFFT plan advanced API for (int i = 0; i < x_size; i++) { hipfftExecC2C(plan, (hipfftComplex *)d_signal+i*w_lag, (hipfftComplex *)d_stft+i*y_size, HIPFFT_FORWARD); } Complex *h_stft; h_stft = (Complex *)malloc(stft_size); hipMemcpy(h_stft, d_stft, stft_size, hipMemcpyDeviceToHost); //Destroy CUFFT context hipfftDestroy(plan); hipFree(d_signal); hipFree(d_stft); return h_stft; }
912c89b4f10ebbbb586f03c782dc2cd3db08026d.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* * Copyright 1993-2014 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ /* Example showing the use of CUFFT for fast 1D-convolution using FFT. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project #include <cuda_runtime.h> #include <cufft.h> #include <cufftXt.h> //#include <helper_functions.h> //#include <helper_cuda.h> #include "cu_stft.h" // Complex data type //////////////////////////////////////////////////////////////////////////////// void my_cufft(Complex *h_signal, int SIGNAL_SIZE) { // Allocate device memory for signal int mem_size = sizeof(Complex)*SIGNAL_SIZE; Complex *d_signal; cudaMalloc((void **)&d_signal, mem_size); // Copy host memory to device cudaMemcpy(d_signal, h_signal, mem_size, cudaMemcpyHostToDevice); cudaMemcpy(h_signal, d_signal, mem_size, cudaMemcpyDeviceToHost); // CUFFT plan simple API cufftHandle plan; cufftPlan1d(&plan, SIGNAL_SIZE, CUFFT_C2C, 1); // CUFFT plan advanced API cufftExecC2C(plan, (cufftComplex *)d_signal, (cufftComplex *)d_signal, CUFFT_FORWARD); cudaMemcpy(h_signal, d_signal, mem_size, cudaMemcpyDeviceToHost); //Destroy CUFFT context cufftDestroy(plan); cudaFree(d_signal); } Complex *my_stft(Complex *h_signal, int s_size, int w_lag, int s_len) { // Allocate device memory for signal //int mem_size = sizeof(Complex)*SIGNAL_SIZE; int x_size = (int)((s_size - s_len) / w_lag); int y_size = s_len; int stft_size = sizeof(Complex)*x_size*y_size; int mem_size = sizeof(Complex)*s_size; Complex *d_signal; cudaMalloc((void **)&d_signal, mem_size); // Copy host memory to device cudaMemcpy(d_signal, h_signal, mem_size, cudaMemcpyHostToDevice); Complex *d_stft; cudaMalloc((void **)&d_stft, stft_size); //cudaMemcpy(h_signal, d_signal, mem_size, //cudaMemcpyDeviceToHost); // CUFFT plan simple API cufftHandle plan; cufftPlan1d(&plan, s_len, CUFFT_C2C, 1); // CUFFT plan advanced API for (int i = 0; i < x_size; i++) { cufftExecC2C(plan, (cufftComplex *)d_signal+i*w_lag, (cufftComplex *)d_stft+i*y_size, CUFFT_FORWARD); } Complex *h_stft; h_stft = (Complex *)malloc(stft_size); cudaMemcpy(h_stft, d_stft, stft_size, cudaMemcpyDeviceToHost); //Destroy CUFFT context cufftDestroy(plan); cudaFree(d_signal); cudaFree(d_stft); return h_stft; }
327bd4068f43b970a5fb71e8ed8b87de88b8597a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * calcNormalFromSmoothedColorWCL.cu * * Created on: 02-09-2013 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" #include "../../hlp.h" #include "../../methods/kernels.cuh" #include "../../methods/interactions.cuh" __device__ static real3 interaction(uint i, uint j, real3 dpos, real3 dvel, Particle *p, Parameters *par) { real q = sqrt(pow2(dpos.x) + pow2(dpos.y) + pow2(dpos.z)) * par->I_H; if ((q < 2.0) && ((i != j) || ((i == j) && (q > 0.001f*par->H)))) { real gkx = grad_of_kern(dpos.x, q, par->GKNORM); real gky = grad_of_kern(dpos.y, q, par->GKNORM); real gkz = grad_of_kern(dpos.z, q, par->GKNORM); real put = p[i].cs - p[j].cs; return MAKE_REAL3(p[j].m*put*gkx/p[j].d, p[j].m*put*gky/p[j].d, p[j].m*put*gkz/p[j].d); } else { return MAKE_REAL3(0.0, 0.0, 0.0); } } __global__ void calcNormalFromSmoothedColorWCL(Particle *p, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par) { uint index = threadIdx.x + blockIdx.x*blockDim.x; if (index < par->N) { register real3 result = MAKE_REAL3(0.0, 0.0, 0.0); #include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh" p[index].n.x = -result.x; p[index].n.y = -result.y; p[index].n.z = -result.z; p[index].n.w = sqrt(pow2(p[index].n.x) + pow2(p[index].n.y) + pow2(p[index].n.z)); } }
327bd4068f43b970a5fb71e8ed8b87de88b8597a.cu
/* * calcNormalFromSmoothedColorWCL.cu * * Created on: 02-09-2013 * Author: Kamil Szewc ([email protected]) */ #include "../../sph.h" #include "../../hlp.h" #include "../../methods/kernels.cuh" #include "../../methods/interactions.cuh" __device__ static real3 interaction(uint i, uint j, real3 dpos, real3 dvel, Particle *p, Parameters *par) { real q = sqrt(pow2(dpos.x) + pow2(dpos.y) + pow2(dpos.z)) * par->I_H; if ((q < 2.0) && ((i != j) || ((i == j) && (q > 0.001f*par->H)))) { real gkx = grad_of_kern(dpos.x, q, par->GKNORM); real gky = grad_of_kern(dpos.y, q, par->GKNORM); real gkz = grad_of_kern(dpos.z, q, par->GKNORM); real put = p[i].cs - p[j].cs; return MAKE_REAL3(p[j].m*put*gkx/p[j].d, p[j].m*put*gky/p[j].d, p[j].m*put*gkz/p[j].d); } else { return MAKE_REAL3(0.0, 0.0, 0.0); } } __global__ void calcNormalFromSmoothedColorWCL(Particle *p, uint *gridParticleIndex, uint *cellStart, uint *cellEnd, Parameters *par) { uint index = threadIdx.x + blockIdx.x*blockDim.x; if (index < par->N) { register real3 result = MAKE_REAL3(0.0, 0.0, 0.0); #include "../../methods/interactions/interactionsNegativeOnWallNoSlip.cuh" p[index].n.x = -result.x; p[index].n.y = -result.y; p[index].n.z = -result.z; p[index].n.w = sqrt(pow2(p[index].n.x) + pow2(p[index].n.y) + pow2(p[index].n.z)); } }
f7ad30a9be11f731550d63576598812bc17c656d.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // program name: cudaThreadBlock.cu // this program is designed for showing thread block example. // author: Shane Cook (Nvidia .Inc) // modified by Yang Yang @ Peking University July 2017 // // // Sketch diagram for thread blocks: // ----------------------------------------------------------------------------------- // | thread block 0 || thread block 0 || thread block 1 || thread block 1 || // | thread bundle 0 || thread bundle 1 || thread bundle 0 || thread bundle 1 || // | (thread 0~31) || (thread 32~63) || (thread 64~95) || (thread 96~127) || // ----------------------------------------------------------------------------------- // // Adress space: // ----------------------------------------------------------------------------------- // | || || || || // | adress || adress || adress || adress || // | (0~31) || (32~63) || (64~95) || (96~127) || // ----------------------------------------------------------------------------------- // head files #include <stdio.h> #include <stdlib.h> #include <conio.h> /* Cuda Kernel function: waht is my id */ __global__ void what_is_my_id(unsigned int * const block, unsigned int * const thread, unsigned int * const warp, unsigned int * const calc_thread) { /* Thread id equals to block index * block size + thread offset into the block */ const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[thread_idx] = blockIdx.x; thread[thread_idx] = threadIdx.x; /* Calculate warp using built in variable warpSize */ warp[thread_idx] = threadIdx.x / warpSize; calc_thread[thread_idx] = thread_idx; } #define ARRAY_SIZE 128 #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * ARRAY_SIZE) /* Declare statically four arrays of ARRAY_SIZE each */ unsigned int cpu_block[ARRAY_SIZE]; unsigned int cpu_thread[ARRAY_SIZE]; unsigned int cpu_warp[ARRAY_SIZE]; unsigned int cpu_calc_thread[ARRAY_SIZE]; int main(void) { /* Total thread count = 2 * 64 = 128 */ const unsigned int num_blocks = 2; const unsigned int num_threads = 64; char ch; /* Decalre pointers for GPU based params */ unsigned int * gpu_block; unsigned int * gpu_thread; unsigned int * gpu_warp; unsigned int * gpu_calc_thread; /* Declare loop counter for use later */ unsigned int i; /* Allocate four arrays on the GPU */ hipMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES); // Why here type is (void **)? hipMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES); hipMalloc((void **)&gpu_warp, ARRAY_SIZE_IN_BYTES); hipMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES); /* Execute our cuda kernel */ hipLaunchKernelGGL(( what_is_my_id), dim3(num_blocks), dim3(num_threads), 0, 0, gpu_block, gpu_thread, gpu_warp, gpu_calc_thread); /* Copy back the gpu results to the CPU, from display RAM to RAM in physical*/ hipMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost); hipMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost); hipMemcpy(cpu_warp, gpu_warp, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost); hipMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, hipMemcpyDeviceToHost); /* Free the arrays on the GPU as now we're done with them */ hipFree(gpu_block); hipFree(gpu_thread); hipFree(gpu_warp); hipFree(gpu_calc_thread); /* Iterate through the arrays and print */ for (i = 0; i < ARRAY_SIZE; i++) { if(i>0) { if(cpu_warp[i] == 1 && cpu_warp[i-1] == 0) printf("\n"); if(cpu_block[i] == 1 && cpu_block[i-1] ==0) printf("\n\n"); } printf("Calculated Thread: %3u - Block: %2u - Warp %2u - Thread %3u\n", cpu_calc_thread[i], cpu_block[i], cpu_warp[i], cpu_thread[i]); } /* To avoid program exit automatically */ ch = getchar(); }
f7ad30a9be11f731550d63576598812bc17c656d.cu
// program name: cudaThreadBlock.cu // this program is designed for showing thread block example. // author: Shane Cook (Nvidia .Inc) // modified by Yang Yang @ Peking University July 2017 // // // Sketch diagram for thread blocks: // ----------------------------------------------------------------------------------- // | thread block 0 || thread block 0 || thread block 1 || thread block 1 || // | thread bundle 0 || thread bundle 1 || thread bundle 0 || thread bundle 1 || // | (thread 0~31) || (thread 32~63) || (thread 64~95) || (thread 96~127) || // ----------------------------------------------------------------------------------- // // Adress space: // ----------------------------------------------------------------------------------- // | || || || || // | adress || adress || adress || adress || // | (0~31) || (32~63) || (64~95) || (96~127) || // ----------------------------------------------------------------------------------- // head files #include <stdio.h> #include <stdlib.h> #include <conio.h> /* Cuda Kernel function: waht is my id */ __global__ void what_is_my_id(unsigned int * const block, unsigned int * const thread, unsigned int * const warp, unsigned int * const calc_thread) { /* Thread id equals to block index * block size + thread offset into the block */ const unsigned int thread_idx = (blockIdx.x * blockDim.x) + threadIdx.x; block[thread_idx] = blockIdx.x; thread[thread_idx] = threadIdx.x; /* Calculate warp using built in variable warpSize */ warp[thread_idx] = threadIdx.x / warpSize; calc_thread[thread_idx] = thread_idx; } #define ARRAY_SIZE 128 #define ARRAY_SIZE_IN_BYTES (sizeof(unsigned int) * ARRAY_SIZE) /* Declare statically four arrays of ARRAY_SIZE each */ unsigned int cpu_block[ARRAY_SIZE]; unsigned int cpu_thread[ARRAY_SIZE]; unsigned int cpu_warp[ARRAY_SIZE]; unsigned int cpu_calc_thread[ARRAY_SIZE]; int main(void) { /* Total thread count = 2 * 64 = 128 */ const unsigned int num_blocks = 2; const unsigned int num_threads = 64; char ch; /* Decalre pointers for GPU based params */ unsigned int * gpu_block; unsigned int * gpu_thread; unsigned int * gpu_warp; unsigned int * gpu_calc_thread; /* Declare loop counter for use later */ unsigned int i; /* Allocate four arrays on the GPU */ cudaMalloc((void **)&gpu_block, ARRAY_SIZE_IN_BYTES); // Why here type is (void **)? cudaMalloc((void **)&gpu_thread, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_warp, ARRAY_SIZE_IN_BYTES); cudaMalloc((void **)&gpu_calc_thread, ARRAY_SIZE_IN_BYTES); /* Execute our cuda kernel */ what_is_my_id<<<num_blocks, num_threads>>>(gpu_block, gpu_thread, gpu_warp, gpu_calc_thread); /* Copy back the gpu results to the CPU, from display RAM to RAM in physical*/ cudaMemcpy(cpu_block, gpu_block, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_thread, gpu_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_warp, gpu_warp, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); cudaMemcpy(cpu_calc_thread, gpu_calc_thread, ARRAY_SIZE_IN_BYTES, cudaMemcpyDeviceToHost); /* Free the arrays on the GPU as now we're done with them */ cudaFree(gpu_block); cudaFree(gpu_thread); cudaFree(gpu_warp); cudaFree(gpu_calc_thread); /* Iterate through the arrays and print */ for (i = 0; i < ARRAY_SIZE; i++) { if(i>0) { if(cpu_warp[i] == 1 && cpu_warp[i-1] == 0) printf("\n"); if(cpu_block[i] == 1 && cpu_block[i-1] ==0) printf("\n\n"); } printf("Calculated Thread: %3u - Block: %2u - Warp %2u - Thread %3u\n", cpu_calc_thread[i], cpu_block[i], cpu_warp[i], cpu_thread[i]); } /* To avoid program exit automatically */ ch = getchar(); }
82924628e3781928fc7051a5ef188b1344305ee9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" //===------------------ GeantX --------------------------------------------===// // // Geant Exascale Pilot // // For the licensing terms see LICENSE file. // For the list of contributors see CREDITS file. // Copyright (C) 2019, Geant Exascale Pilot team, All rights reserved. //===----------------------------------------------------------------------===// #include "backend/cuda/Interface.h" #include "base/Map.h" class MyClass { private: float fData; public: VECCORE_ATT_HOST_DEVICE MyClass() { fData = 0; } VECCORE_ATT_HOST_DEVICE MyClass(float data) { fData = data; } VECCORE_ATT_HOST_DEVICE float getData() const { return fData; } }; __global__ void testNew(vecgeom::map<double, MyClass> *devMap, double *key, int N) { for (int i = 0; i < N; i++) { MyClass key1 = (*devMap)[key[i]]; MyClass key2 = devMap->find(key[i])->second; // printf("Key %f, Value from op[] = %f and from find %f\n",key[i],key1, key2); } } __global__ void rebuildMap(vecgeom::map<double, MyClass> *devMap, double *key, MyClass *value, int N) { // vecgeom::map<double,double> *myDevMap = new vecgeom::map<double, double>; // for (int i=0;i<N;i++) // std::cout<<" i "<<value[i]<<std::endl; for (int i = 0; i < N; i++) { (*devMap)[key[i]] = value[i]; printf(" REBUILDING key %f and value %f from op[]\n ", key[i], ((*devMap)[key[i]]).getData()); auto search = devMap->find(key[i]); printf(" REBUILDING key %f and value %f from find\n ", key[i], (search->second).getData()); } } namespace vecgeom { namespace cxx { template size_t DevicePtr<MyClass>::SizeOf(); template void DevicePtr<MyClass>::Construct() const; template size_t DevicePtr<cuda::map<double, MyClass>>::SizeOf(); template void DevicePtr<cuda::map<double, MyClass>>::Construct() const; } // namespace cxx } // namespace vecgeom void launchTestNew(vecgeom::cxx::DevicePtr<vecgeom::cuda::map<double, MyClass>> &devMap, vecgeom::cxx::DevicePtr<double> key, int N, int nBlocks, int nThreads) { int threadsPerBlock = nThreads; int blocksPerGrid = nBlocks; hipLaunchKernelGGL(( testNew), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devMap, key, N); } void launchRebuildMap( vecgeom::cxx::DevicePtr<vecgeom::cuda::map<double, MyClass>> &devMap, vecgeom::cxx::DevicePtr<double> key, vecgeom::cxx::DevicePtr<MyClass> value, int N, int nBlocks, int nThreads) { int threadsPerBlock = nThreads; int blocksPerGrid = nBlocks; hipLaunchKernelGGL(( rebuildMap), dim3(blocksPerGrid), dim3(threadsPerBlock), 0, 0, devMap, key, value, N); }
82924628e3781928fc7051a5ef188b1344305ee9.cu
//===------------------ GeantX --------------------------------------------===// // // Geant Exascale Pilot // // For the licensing terms see LICENSE file. // For the list of contributors see CREDITS file. // Copyright (C) 2019, Geant Exascale Pilot team, All rights reserved. //===----------------------------------------------------------------------===// #include "backend/cuda/Interface.h" #include "base/Map.h" class MyClass { private: float fData; public: VECCORE_ATT_HOST_DEVICE MyClass() { fData = 0; } VECCORE_ATT_HOST_DEVICE MyClass(float data) { fData = data; } VECCORE_ATT_HOST_DEVICE float getData() const { return fData; } }; __global__ void testNew(vecgeom::map<double, MyClass> *devMap, double *key, int N) { for (int i = 0; i < N; i++) { MyClass key1 = (*devMap)[key[i]]; MyClass key2 = devMap->find(key[i])->second; // printf("Key %f, Value from op[] = %f and from find %f\n",key[i],key1, key2); } } __global__ void rebuildMap(vecgeom::map<double, MyClass> *devMap, double *key, MyClass *value, int N) { // vecgeom::map<double,double> *myDevMap = new vecgeom::map<double, double>; // for (int i=0;i<N;i++) // std::cout<<" i "<<value[i]<<std::endl; for (int i = 0; i < N; i++) { (*devMap)[key[i]] = value[i]; printf(" REBUILDING key %f and value %f from op[]\n ", key[i], ((*devMap)[key[i]]).getData()); auto search = devMap->find(key[i]); printf(" REBUILDING key %f and value %f from find\n ", key[i], (search->second).getData()); } } namespace vecgeom { namespace cxx { template size_t DevicePtr<MyClass>::SizeOf(); template void DevicePtr<MyClass>::Construct() const; template size_t DevicePtr<cuda::map<double, MyClass>>::SizeOf(); template void DevicePtr<cuda::map<double, MyClass>>::Construct() const; } // namespace cxx } // namespace vecgeom void launchTestNew(vecgeom::cxx::DevicePtr<vecgeom::cuda::map<double, MyClass>> &devMap, vecgeom::cxx::DevicePtr<double> key, int N, int nBlocks, int nThreads) { int threadsPerBlock = nThreads; int blocksPerGrid = nBlocks; testNew<<<blocksPerGrid, threadsPerBlock>>>(devMap, key, N); } void launchRebuildMap( vecgeom::cxx::DevicePtr<vecgeom::cuda::map<double, MyClass>> &devMap, vecgeom::cxx::DevicePtr<double> key, vecgeom::cxx::DevicePtr<MyClass> value, int N, int nBlocks, int nThreads) { int threadsPerBlock = nThreads; int blocksPerGrid = nBlocks; rebuildMap<<<blocksPerGrid, threadsPerBlock>>>(devMap, key, value, N); }
464ad16488c620342cd7de20744672cec1bb0a5a.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <type_traits> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/NestedTensorImpl.h> #include <ATen/TensorAccessor.h> #include <c10/util/Logging.h> #include <ATen/hip/HIPContext.h> #include <ATen/hip/detail/KernelUtils.h> #include <ATen/hip/detail/IndexUtils.cuh> #include <ATen/native/NonSymbolicBC.h> #include <ATen/native/hip/Loops.cuh> #include <ATen/native/hip/MemoryAccess.cuh> #include <ATen/native/hip/PersistentSoftmax.cuh> #include <ATen/native/hip/block_reduce.cuh> #include <c10/hip/HIPMathCompat.h> #include <ATen/native/transformers/attention.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/nested/NestedTensorTransformerFunctions.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/transformers/hip/sdp_utils.h> #ifdef USE_FLASH_ATTENTION #include <ATen/native/transformers/hip/flash_attn/fmha_api.h> #include <ATen/native/transformers/hip/mem_eff_attention/kernel_forward.h> #endif namespace at { namespace native { namespace { #define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \ { \ if (VALUE_HEAD_DIM <= 64) { \ constexpr bool kIs64x64 = true; \ constexpr bool kSingleValueIteration = true; \ FN(); \ } else { \ constexpr bool kIs64x64 = false; \ if (VALUE_HEAD_DIM <= 128) { \ constexpr bool kSingleValueIteration = true; \ FN(); \ } else { \ constexpr bool kSingleValueIteration = false; \ FN(); \ } \ } \ } #define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \ { \ hipDeviceProp_t* properties = \ at::cuda::getDeviceProperties(QUERY.device().index()); \ const int computeCapability = properties->major * 10 + properties->minor; \ DISPATCH_BLOCKSIZE( \ VALUE.size(-1), ([&]() { \ static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \ static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \ DISPATCH_TYPES( \ QUERY, ([&]() { \ DISPATCH_ARCHTAG( \ computeCapability, ([&]() { \ using AlignedAK = AttentionKernel< \ scalar_t, \ ArchTag, \ true, \ kQueriesPerBlock, \ kKeysPerBlock, \ kSingleValueIteration>; \ /* Run a more efficient kernel (with `isAligned=True`) \ if memory is correctly aligned*/ \ bool isAligned = \ (QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \ KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \ VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \ /* TODO: Should we warn or log somewhere when we use a \ less efficient kernel due to wrong alignment? */ \ DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \ using Kernel = AttentionKernel< \ scalar_t, \ ArchTag, \ kIsAligned, \ kQueriesPerBlock, \ kKeysPerBlock, \ kSingleValueIteration>; \ FUNC(); \ })) \ })) \ })); \ })); \ } static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4; template <typename scalar_t, typename accscalar_t, bool assume_aligned> __global__ void transform_bias_rescale_qkv_kernel( // [B, T, 3 * D] const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. auto NH = q_k_v.size(2); auto T = q_k_v.size(3); auto DH = q_k_v.size(4); auto t = blockIdx.x % T; auto b = blockIdx.x / T; auto D = NH * DH; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { // Same as above, but we can't vectorize memory access. for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; scalar_t qkv_q = qkv[b][t][d + 0 * D]; scalar_t qkv_k = qkv[b][t][d + 1 * D]; scalar_t qkv_v = qkv[b][t][d + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } template <typename scalar_t, typename accscalar_t, bool assume_aligned = false> __global__ void transform_bias_rescale_qkv_add_padding_kernel( // [B, T, 3 * D], but it's a NestedTensor buffer const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, const int* offsets, const int* input_sizes, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. const auto NH = q_k_v.size(2); const auto T = q_k_v.size(3); const auto DH = q_k_v.size(4); const auto t = blockIdx.x % T; const auto b = blockIdx.x / T; const auto D = NH * DH; const auto _3D = 3 * D; const auto offset_for_batch = offsets[b]; const auto input_dim = 1; const auto* sizes_i = input_sizes + b * input_dim; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; const auto first_item_offset = t * _3D + d; const auto last_item_offset = first_item_offset + VEC - 1; const bool first_item_in_bounds = first_item_offset < sizes_i[0]; const bool entire_vec_in_bounds = last_item_offset < sizes_i[0]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); if (entire_vec_in_bounds) { const auto offset = offset_for_batch + first_item_offset; *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } } else if (first_item_in_bounds) { const auto offset = offset_for_batch + first_item_offset; qkv_q[0] = qkv[offset + 0 * D]; qkv_k[0] = qkv[offset + 1 * D]; qkv_v[0] = qkv[offset + 2 * D]; qkv_q[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[0]) + static_cast<accscalar_t>(qkv_bias_q[0])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[0]) + static_cast<accscalar_t>(qkv_bias_k[0]))); qkv_v[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[0]) + static_cast<accscalar_t>(qkv_bias_v[0]))); #pragma unroll for (auto ii = 1; ii < VEC; ++ii) { const auto loop_offset = offset + ii; if (loop_offset < sizes_i[0]) { qkv_q[ii] = qkv[loop_offset + 0 * D]; qkv_k[ii] = qkv[loop_offset + 1 * D]; qkv_v[ii] = qkv[loop_offset + 2 * D]; qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } else { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } } else { #pragma unroll for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; const auto item_offset = t * _3D + d; const bool in_bounds = item_offset < sizes_i[0]; scalar_t qkv_q, qkv_k, qkv_v; if (in_bounds) { const auto qkv_offset = offset_for_batch + item_offset; qkv_q = qkv[qkv_offset + 0 * D]; qkv_k = qkv[qkv_offset + 1 * D]; qkv_v = qkv[qkv_offset + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); } else { qkv_q = 0; qkv_k = 0; qkv_v = 0; } q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } Tensor collapse_dims_1_and_2(const Tensor& sizes) { auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1); auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1); return (sizes_dim1 * sizes_dim2).contiguous(); } } // namespace // compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias __host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda( const Tensor& qkv, const Tensor& qkv_bias, const int64_t num_head) { auto B = qkv.is_nested() ? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0) : qkv.size(0); // TODO: calculate this without the std::vector -- NestedTensor_to_mask wants // this too auto T = qkv.is_nested() ? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0] : qkv.size(1); if (qkv.is_nested()) { // Don't mess with non-nested case for now since it's not set up to fiddle // with mask size. // Round T up to next multiple of 8 so as to be able to utilize Tensor // cores. Otherwise, sometimes with padding, *no* row will have the maximum // sequence length and so we'll have a non-divisible-by-8 dimension even if // the model author chose a multiple of 8. T = T + (8 - (T % 8)) % 8; } auto _3D = qkv_bias.size(0); auto D = _3D / 3; TORCH_CHECK(D % num_head == 0); const auto dim_per_head = D / num_head; auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options()); #define CALL_KERNEL(assume_aligned) \ hipLaunchKernelGGL(( transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned>) \ , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) #define CALL_ADD_PADDING_KERNEL(assume_aligned) \ hipLaunchKernelGGL(( transform_bias_rescale_qkv_add_padding_kernel< \ scalar_t, \ accscalar_t, \ assume_aligned>) \ , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), \ nt_qkv_buffer \ .packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ offsets_ptr, \ sizes_ptr, \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, qkv.scalar_type(), "transform_bias_rescale_qkv", [&] { using accscalar_t = acc_type<scalar_t, true>; auto threads = ::max( std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1); auto blocks = B * T; const bool aligned = ((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) && ((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0); if (aligned) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( D % TRANSFORM_BIAS_RESCALE_VEC == 0, "D = num_heads * dim_per_head, so we should have dim_per_head % " "TRANSFORM_BIAS_RESCALE_VEC == 0 => " "D % TRANSFORM_BIAS_RESCALE_VEC == 0"); } if (qkv.is_nested()) { auto* nt_qkv = get_nested_tensor_impl(qkv); const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer(); auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor()); auto offsets = NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel()); at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel()) .copy_(sizes.reshape({-1})); auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true); const auto offsets_ptr = metadata.data_ptr<int>(); const auto sizes_ptr = offsets_ptr + sizes.numel() + 1; const auto input_dim = sizes.sizes()[1]; TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1); if (aligned && ((reinterpret_cast<intptr_t>(qkv.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0)) { CALL_ADD_PADDING_KERNEL(true); } else { CALL_ADD_PADDING_KERNEL(false); } } else if (aligned) { CALL_KERNEL(true); } else { CALL_KERNEL(false); } C10_HIP_KERNEL_LAUNCH_CHECK(); }); #undef CALL_ADD_PADDING_KERNEL #undef CALL_KERNEL auto q_k_v_s = at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0); return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]); } std::tuple<Tensor, Tensor> native_multi_head_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, const int64_t embed_dim, const int64_t num_head, const Tensor& qkv_weight, const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, const c10::optional<Tensor>& mask, bool need_weights, bool average_attn_weights, const c10::optional<int64_t> mask_type) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] TORCH_CHECK( !mask || !query.is_nested(), "NestedTensor with mask is not supported yet"); const auto D = embed_dim; TORCH_CHECK( query.dim() == 3, "expected 3-D `query`, got ", query.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || query.sizes()[2] == embed_dim, "passed-in embed_dim ", embed_dim, " didn't match last dim of query ", query.sizes()[2]); TORCH_CHECK( key.dim() == 3, "expected 3-D `key`, got ", key.dim(), "-D tensor"); TORCH_CHECK( value.dim() == 3, "expected 3-D `value`, got ", value.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || key.is_nested() || value.is_nested() || (query.sizes() == key.sizes() && key.sizes() == value.sizes()), "expected `query`/`key`/`value` shapes to match"); TORCH_CHECK( qkv_weight.dim() == 2, "expected 2-D `qkv_weight`, got ", qkv_weight.dim(), "-D tensor"); TORCH_CHECK( D * 3 == qkv_weight.sizes()[0], "expected `qkv_weight` first dim to be 3x embed_dim"); TORCH_CHECK( D == qkv_weight.sizes()[1], "expected `qkv_weight` second dim to be embed_Dim"); TORCH_CHECK( qkv_bias.dim() == 1, "expected 2-D `qkv_bias`, got ", qkv_bias.dim(), "-D tensor"); TORCH_CHECK( qkv_bias.sizes()[0] == 3 * D, "expected `qkv_bias` first dim and first dim of query to be equal"); TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`"); #ifndef NDEBUG const auto B = query.is_nested() ? get_nested_tensor_impl(query)->get_nested_size_tensor().size(0) : query.sizes()[0]; auto T = query.is_nested() ? 0 : query.sizes()[1]; #endif const auto dim_per_head = D / num_head; if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) { // We have not done linear projection yet but the input for SDP // Is expected to be 4 dimensional. We "cheaply" create view tensors // That will then be used for checking hot path conditions with select_sd_backend auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2); sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention) { auto x = at::linear(query, qkv_weight, qkv_bias); auto chunks = x.chunk(3, -1); auto x_size_0 = x.size(0); chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); auto y = at::scaled_dot_product_attention( chunks[0], chunks[1], chunks[2], mask, 0.0, false); auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim}); return std::make_tuple( at::linear(past_sdp, proj_weight, proj_bias), Tensor()); } // Returned math or error lets not use it } // shape: [B, T, 3 x D] auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight); if (!qkv.is_nested() && qkv.numel() == 0) { if (query.is_nested()) { return std::make_tuple(Tensor(), Tensor()); } return std::make_tuple(at::empty_like(query), Tensor()); } #ifndef NDEBUG if (!query.is_nested() || !qkv.is_nested()) { if (query.is_nested()) { T = qkv.size(1); } debug_assert_shape(__LINE__, qkv, {B, T, 3 * D}); } #endif #ifdef DEBUG_PRINT_EACH_STEP if (!qkv.is_nested()) { std::cerr << "qkv: " << qkv << std::endl; } #endif // shape: 3 x [B, num_head, T, dim_per_head] auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head); qkv = Tensor(); // Not used any more, allow free auto& q = std::get<0>(q_k_v); const auto& k = std::get<1>(q_k_v); const auto& v = std::get<2>(q_k_v); #ifndef NDEBUG debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "q: " << q << std::endl; std::cerr << "k: " << k << std::endl; std::cerr << "v: " << v << std::endl; #endif // shape: [B, num_head, T, T] auto qkt = bmm_nt(q, k); // q & k are dead but cannot be freed because they were packed with v #ifndef NDEBUG debug_assert_shape(__LINE__, qkt, {B, num_head, T, T}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt: " << qkt << std::endl; #endif // shape: [B, num_head, T, T] // TODO: long-term, have a kernel that works with // NestedTensor directly if there is no mask passed qkt = masked_softmax(qkt, mask, query, mask_type); #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt after softmax: " << qkt << std::endl; #endif // shape: [B, num_head, T, dim_per_head] // reuse storage for q; we're done with it auto attn_ctx = bmm_nn(q, qkt, v); // qkv is not dead; we just reused storage for q! if (!need_weights) { qkt = Tensor(); } #ifndef NDEBUG debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "attn_ctx: " << attn_ctx << std::endl; #endif // shape: [B, T, D] // Fuse transform_0213 inside auto proj = transform0213_gemm_nt_bias( attn_ctx, proj_weight, proj_bias, query); #ifndef NDEBUG debug_assert_shape(__LINE__, proj, {B, T, D}); #endif if (need_weights && average_attn_weights) { // weights are not needed for full transformer, so don't worry too // much about performance -- we implement this just to make use // cases that don't disable need_weights still get some speedup. qkt = qkt.sum(1); qkt /= num_head; } return std::make_tuple(std::move(proj), std::move(qkt)); } std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, int64_t, int64_t, Tensor> _scaled_dot_product_flash_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, double dropout_p, bool is_causal, bool return_debug_mask) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention"); // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) // Key (Batch x Num_heads x KV_seq_len x Dim_per_head) // Value (Batch x Num_heads x KV_seq_len x Dim_per_head) const int64_t batch_size = query.size(0); const int64_t num_heads = query.size(1); const int64_t max_seqlen_batch_q = query.size(2); const int64_t head_dim = query.size(3); const int64_t max_seqlen_batch_k = key.size(2); const int64_t max_seqlen_batch_v = value.size(2); TORCH_CHECK( max_seqlen_batch_k == max_seqlen_batch_v, "Key and Value must have the same sequence length"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor cumulative_sequence_length_q = at::arange( 0, (batch_size + 1) * max_seqlen_batch_q, max_seqlen_batch_q, TensorOptions().device(at::kCUDA).dtype(at::kInt)); Tensor cumulative_sequence_length_k = at::arange( 0, (batch_size + 1) * max_seqlen_batch_k, max_seqlen_batch_k, TensorOptions().device(at::kCUDA).dtype(at::kInt)); int64_t Nnz_q{batch_size * max_seqlen_batch_q}; int64_t Nnz_kv{batch_size * max_seqlen_batch_k}; // For the standard MHA these will actually be views Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim}); Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor attention, log_sumexp, debug_attn_mask; int64_t philox_seed{0}, philox_offset{0}; std::tie(attention, log_sumexp, philox_seed, philox_offset, debug_attn_mask) = at::_flash_attention_forward( query_reshaped, key_reshaped, value_reshaped, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, is_causal, return_debug_mask); // Reshape output to convert nnz to batch_size and seq_len attention = attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2); return std::make_tuple(attention, log_sumexp, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask); } std::tuple<Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, bool compute_log_sumexp, bool is_causal) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor attention, log_sumexp; std::tie(attention, log_sumexp) = at::_efficient_attention_forward( q_t, k_t, v_t, c10::nullopt, c10::nullopt, c10::nullopt, compute_log_sumexp, is_causal); attention = attention.transpose(1,2); return std::make_tuple(std::move(attention), std::move(log_sumexp)); } int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value, const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal){ sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::error) { TORCH_CHECK( false, "No viable backend for scaled_dot_product_attention was found. ", "This is likely due to turning off both the math kernel and the fused kernels."); } return static_cast<int64_t>(backend); } bool _chunk_grad_outputs_efficient_attention( const Tensor& query, const Tensor& key, const Tensor& value, bool is_causal) { int64_t M = query.size(2); int64_t N = key.size(2); bool grad_kv_needs_init = is_causal && N > M; bool is_aliased = query.storage().is_alias_of(key.storage()) && query.storage().is_alias_of(value.storage()); bool equal_seq_len = query.size(2) == key.size(2); bool q_v_same_head_dim = query.size(3) == value.size(3); bool chunk_grad_outputs = (!grad_kv_needs_init && equal_seq_len && q_v_same_head_dim && is_aliased); return chunk_grad_outputs; } std::tuple<Tensor, Tensor, int64_t, int64_t, Tensor> _flash_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, const Tensor& cumulative_sequence_length_q, const Tensor& cumulative_sequence_length_k, const int64_t max_seqlen_batch_q, const int64_t max_seqlen_batch_k, double dropout_p, bool is_causal, bool return_debug_mask) { #if defined(USE_FLASH_ATTENTION) /* num_splits determines how much to parallelize over the seqlen_q dimension num_splits=0 means it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking. We will hard code it to 0 for now */ constexpr int num_splits{0}; auto softmax_scale = ::pow(query.size(-1), -0.5); at::Tensor output = at::empty_like(query); Tensor logsumexp, debug_attn_mask; uint64_t philox_seed{0}, philox_offset{0}; std::tie(logsumexp, philox_seed, philox_offset, debug_attn_mask) = fmha::mha_fwd( query, key, value, output, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, softmax_scale, false, /*zero_tensors = false for all calls here*/ is_causal, return_debug_mask, /*return_softmax (this is used for testing)*/ num_splits); debug_attn_mask = return_debug_mask ? debug_attn_mask : at::empty({0}, query.options()); int64_t signed_philox_seed = sdp::bit_cast<int64_t>(philox_seed); int64_t signed_philox_offset= sdp::bit_cast<int64_t>(philox_offset); return std::make_tuple(output, logsumexp, signed_philox_seed, signed_philox_offset, debug_attn_mask); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor(), Tensor(), 0, 0, Tensor()); } std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward( const at::Tensor& query, // [b, seqlen, num_heads, K] const at::Tensor& key, // [b, seqlen, num_heads, K] const at::Tensor& value, // [b, seqlen, num_heads, Kv] // (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the // position of the first query token for batch $b const c10::optional<at::Tensor>& cu_seqlens_q, // (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the // position of the first key token for batch $b const c10::optional<at::Tensor>& cu_seqlens_k, // (Mode 1MHK only) Maximum sequence length across batches const c10::optional<int64_t> max_seqlen_q_, bool compute_logsumexp, bool causal) { #if defined(USE_FLASH_ATTENTION) // TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a // machine that is >= 5.0. In practice, this is not a problem but since // this would avoid runtime architecture checks, we should look into it TORCH_CHECK(query.dim() == 4); TORCH_CHECK(key.dim() == 4); TORCH_CHECK(value.dim() == 4); // Batch sizes TORCH_CHECK(query.size(0) == key.size(0)); TORCH_CHECK(query.size(0) == value.size(0)); // Sequence length TORCH_CHECK(key.size(1) == value.size(1)); // Num heads TORCH_CHECK(query.size(2) == key.size(2)); TORCH_CHECK(query.size(2) == value.size(2)); // Embedding per head TORCH_CHECK(query.size(3) == key.size(3)); int64_t max_seqlen_q = 0, max_seqlen_k=0; TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value()); if (cu_seqlens_q.has_value()) { TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int); TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int); TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q)); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k)); TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0)); TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1"); TORCH_CHECK(max_seqlen_q_.has_value()); max_seqlen_q = *max_seqlen_q_; max_seqlen_k = 0; // Will be set inside the kernel } else { max_seqlen_q = query.size(1); max_seqlen_k = key.size(1); } CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value); at::hip::HIPGuardMasqueradingAsCUDA device_guard(query.device()); hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); int64_t B = query.size(0); int64_t M = query.size(1); int64_t N = key.size(1); int64_t num_heads = query.size(-2); int64_t K = query.size(-1); int64_t Kv = value.size(-1); at::Tensor res; at::Tensor logsumexp; auto launchKernel = [&](auto _k, int computeCapability) { using Kernel = decltype(_k); using scalar_t = typename Kernel::scalar_t; (void)_k; res = at::empty( {B, M, num_heads, Kv}, query.options().dtype( TypeTraits<typename Kernel::output_t>::atScalarType())); // NOTE: Should be aligned (by padding) in case M is // not a good number for loading during backward constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE; logsumexp = at::empty( {B, num_heads, compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0}, query.options().dtype(at::ScalarType::Float)); typename Kernel::Params p; p.query_ptr = (scalar_t*)query.data_ptr(); p.key_ptr = (scalar_t*)key.data_ptr(); p.value_ptr = (scalar_t*)value.data_ptr(); p.logsumexp_ptr = compute_logsumexp ? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr() : nullptr; at::Tensor output_accum; if (Kernel::kNeedsOutputAccumulatorBuffer) { output_accum = at::empty( {B, M, num_heads, Kv}, query.options().dtype( TypeTraits<typename Kernel::output_accum_t>::atScalarType())); p.output_accum_ptr = (typename Kernel::output_accum_t*)output_accum.data_ptr(); } else { p.output_accum_ptr = nullptr; } p.output_ptr = (typename Kernel::output_t*)res.data_ptr(); if (cu_seqlens_q.has_value()) { p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr(); p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr(); } #define ASSIGN_CHECK_OVERFLOW(A, B) \ { \ A = B; \ TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \ } p.num_heads = num_heads; p.head_dim = query.size(3); p.head_dim_value = value.size(3); p.num_queries = max_seqlen_q; p.num_keys = max_seqlen_k; p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B; p.causal = causal; ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0)); ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0)); ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0)); ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1)); ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1)); ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1)); ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2)); ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2)); ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2)); constexpr auto kernel_fn = attention_kernel_batched<Kernel>; size_t smem_bytes = sizeof(typename Kernel::SharedStorage); if (smem_bytes > 0xc000) { TORCH_INTERNAL_ASSERT( computeCapability >= 70, "This kernel requires too much shared memory on this machine!"); AT_CUDA_CHECK(hipFuncSetAttribute( kernel_fn, hipFuncAttributeMaxDynamicSharedMemorySize, smem_bytes)); } Kernel::check_supported(p); hipLaunchKernelGGL(( kernel_fn), dim3(p.getBlocksGrid()), dim3(p.getThreadsGrid()), smem_bytes, stream, p); }; // Dispatch to the right kernel DISPATCH_KERNEL(query, key, value, ([&]() { launchKernel(Kernel{}, computeCapability); })); AT_CUDA_CHECK(hipGetLastError()); return std::make_tuple(res, logsumexp); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor{}, Tensor{}); } Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){ TORCH_CHECK(false, "This operator should be overridden in python before use"); return at::Tensor(); } REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda); } // namespace native } // namespace at
464ad16488c620342cd7de20744672cec1bb0a5a.cu
#include <type_traits> #include <ATen/ATen.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <ATen/native/DispatchStub.h> #include <ATen/NestedTensorImpl.h> #include <ATen/TensorAccessor.h> #include <c10/util/Logging.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/cuda/detail/KernelUtils.h> #include <ATen/cuda/detail/IndexUtils.cuh> #include <ATen/native/NonSymbolicBC.h> #include <ATen/native/cuda/Loops.cuh> #include <ATen/native/cuda/MemoryAccess.cuh> #include <ATen/native/cuda/PersistentSoftmax.cuh> #include <ATen/native/cuda/block_reduce.cuh> #include <c10/cuda/CUDAMathCompat.h> #include <ATen/native/transformers/attention.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/nested/NestedTensorTransformerFunctions.h> #include <ATen/native/nested/NestedTensorUtils.h> #include <ATen/native/transformers/cuda/sdp_utils.h> #ifdef USE_FLASH_ATTENTION #include <ATen/native/transformers/cuda/flash_attn/fmha_api.h> #include <ATen/native/transformers/cuda/mem_eff_attention/kernel_forward.h> #endif namespace at { namespace native { namespace { #define DISPATCH_BLOCKSIZE(VALUE_HEAD_DIM, FN) \ { \ if (VALUE_HEAD_DIM <= 64) { \ constexpr bool kIs64x64 = true; \ constexpr bool kSingleValueIteration = true; \ FN(); \ } else { \ constexpr bool kIs64x64 = false; \ if (VALUE_HEAD_DIM <= 128) { \ constexpr bool kSingleValueIteration = true; \ FN(); \ } else { \ constexpr bool kSingleValueIteration = false; \ FN(); \ } \ } \ } #define DISPATCH_KERNEL(QUERY, KEY, VALUE, FUNC) \ { \ cudaDeviceProp* properties = \ at::cuda::getDeviceProperties(QUERY.device().index()); \ const int computeCapability = properties->major * 10 + properties->minor; \ DISPATCH_BLOCKSIZE( \ VALUE.size(-1), ([&]() { \ static constexpr int64_t kQueriesPerBlock = kIs64x64 ? 64 : 32; \ static constexpr int64_t kKeysPerBlock = kIs64x64 ? 64 : 128; \ DISPATCH_TYPES( \ QUERY, ([&]() { \ DISPATCH_ARCHTAG( \ computeCapability, ([&]() { \ using AlignedAK = AttentionKernel< \ scalar_t, \ ArchTag, \ true, \ kQueriesPerBlock, \ kKeysPerBlock, \ kSingleValueIteration>; \ /* Run a more efficient kernel (with `isAligned=True`) \ if memory is correctly aligned*/ \ bool isAligned = \ (QUERY.stride(2) % AlignedAK::kAlignmentQ == 0 && \ KEY.stride(2) % AlignedAK::kAlignmentK == 0 && \ VALUE.stride(2) % AlignedAK::kAlignmentV == 0); \ /* TODO: Should we warn or log somewhere when we use a \ less efficient kernel due to wrong alignment? */ \ DISPATCH_BOOL(isAligned, kIsAligned, ([&]() { \ using Kernel = AttentionKernel< \ scalar_t, \ ArchTag, \ kIsAligned, \ kQueriesPerBlock, \ kKeysPerBlock, \ kSingleValueIteration>; \ FUNC(); \ })) \ })) \ })); \ })); \ } static constexpr int TRANSFORM_BIAS_RESCALE_VEC = 4; template <typename scalar_t, typename accscalar_t, bool assume_aligned> __global__ void transform_bias_rescale_qkv_kernel( // [B, T, 3 * D] const PackedTensorAccessor64<scalar_t, 3, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. auto NH = q_k_v.size(2); auto T = q_k_v.size(3); auto DH = q_k_v.size(4); auto t = blockIdx.x % T; auto b = blockIdx.x / T; auto D = NH * DH; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[b][t][d + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { // Same as above, but we can't vectorize memory access. for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; scalar_t qkv_q = qkv[b][t][d + 0 * D]; scalar_t qkv_k = qkv[b][t][d + 1 * D]; scalar_t qkv_v = qkv[b][t][d + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } template <typename scalar_t, typename accscalar_t, bool assume_aligned = false> __global__ void transform_bias_rescale_qkv_add_padding_kernel( // [B, T, 3 * D], but it's a NestedTensor buffer const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv, // [3 * D] const PackedTensorAccessor64<scalar_t, 1, RestrictPtrTraits> qkv_bias, const int* offsets, const int* input_sizes, // [3, B, NH, T, DH] PackedTensorAccessor64<scalar_t, 5, RestrictPtrTraits> q_k_v, const scalar_t inv_sqrt_dim_per_head) { // warp per DH. // so launch B * NH * T warps. const auto NH = q_k_v.size(2); const auto T = q_k_v.size(3); const auto DH = q_k_v.size(4); const auto t = blockIdx.x % T; const auto b = blockIdx.x / T; const auto D = NH * DH; const auto _3D = 3 * D; const auto offset_for_batch = offsets[b]; const auto input_dim = 1; const auto* sizes_i = input_sizes + b * input_dim; if (assume_aligned) { constexpr int VEC = TRANSFORM_BIAS_RESCALE_VEC; using LoadT = memory::aligned_vector<scalar_t, VEC>; for (int32_t d_v = threadIdx.x; d_v < D / VEC; d_v += blockDim.x) { auto d = d_v * VEC; auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q[VEC]; scalar_t qkv_bias_k[VEC]; scalar_t qkv_bias_v[VEC]; scalar_t qkv_q[VEC]; scalar_t qkv_k[VEC]; scalar_t qkv_v[VEC]; const auto first_item_offset = t * _3D + d; const auto last_item_offset = first_item_offset + VEC - 1; const bool first_item_in_bounds = first_item_offset < sizes_i[0]; const bool entire_vec_in_bounds = last_item_offset < sizes_i[0]; // Here we require D % VEC == 0 for these vectorized loads. *reinterpret_cast<LoadT*>(&qkv_bias_q) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_k) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_bias_v) = *reinterpret_cast<const LoadT*>(&qkv_bias[d + 2 * D]); if (entire_vec_in_bounds) { const auto offset = offset_for_batch + first_item_offset; *reinterpret_cast<LoadT*>(&qkv_q) = *reinterpret_cast<const LoadT*>(&qkv[offset + 0 * D]); *reinterpret_cast<LoadT*>(&qkv_k) = *reinterpret_cast<const LoadT*>(&qkv[offset + 1 * D]); *reinterpret_cast<LoadT*>(&qkv_v) = *reinterpret_cast<const LoadT*>(&qkv[offset + 2 * D]); #pragma unroll // TODO: specialize for float2half2/half2float2? for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } } else if (first_item_in_bounds) { const auto offset = offset_for_batch + first_item_offset; qkv_q[0] = qkv[offset + 0 * D]; qkv_k[0] = qkv[offset + 1 * D]; qkv_v[0] = qkv[offset + 2 * D]; qkv_q[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[0]) + static_cast<accscalar_t>(qkv_bias_q[0])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[0]) + static_cast<accscalar_t>(qkv_bias_k[0]))); qkv_v[0] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[0]) + static_cast<accscalar_t>(qkv_bias_v[0]))); #pragma unroll for (auto ii = 1; ii < VEC; ++ii) { const auto loop_offset = offset + ii; if (loop_offset < sizes_i[0]) { qkv_q[ii] = qkv[loop_offset + 0 * D]; qkv_k[ii] = qkv[loop_offset + 1 * D]; qkv_v[ii] = qkv[loop_offset + 2 * D]; qkv_q[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q[ii]) + static_cast<accscalar_t>(qkv_bias_q[ii])) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k[ii]) + static_cast<accscalar_t>(qkv_bias_k[ii]))); qkv_v[ii] = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v[ii]) + static_cast<accscalar_t>(qkv_bias_v[ii]))); } else { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } } else { #pragma unroll for (auto ii = 0; ii < VEC; ++ii) { qkv_q[ii] = 0; qkv_k[ii] = 0; qkv_v[ii] = 0; } } // Here we require DH % VEC == 0 for these vectorized stores. *reinterpret_cast<LoadT*>(&q_k_v[0][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_q); *reinterpret_cast<LoadT*>(&q_k_v[1][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_k); *reinterpret_cast<LoadT*>(&q_k_v[2][b][nh][t][dh]) = *reinterpret_cast<const LoadT*>(&qkv_v); } } else { for (int32_t d = threadIdx.x; d < D; d += blockDim.x) { auto nh = d / DH; auto dh = d % DH; scalar_t qkv_bias_q = qkv_bias[d + 0 * D]; scalar_t qkv_bias_k = qkv_bias[d + 1 * D]; scalar_t qkv_bias_v = qkv_bias[d + 2 * D]; const auto item_offset = t * _3D + d; const bool in_bounds = item_offset < sizes_i[0]; scalar_t qkv_q, qkv_k, qkv_v; if (in_bounds) { const auto qkv_offset = offset_for_batch + item_offset; qkv_q = qkv[qkv_offset + 0 * D]; qkv_k = qkv[qkv_offset + 1 * D]; qkv_v = qkv[qkv_offset + 2 * D]; qkv_q = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_q) + static_cast<accscalar_t>(qkv_bias_q)) * static_cast<accscalar_t>(inv_sqrt_dim_per_head)); qkv_k = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_k) + static_cast<accscalar_t>(qkv_bias_k))); qkv_v = static_cast<scalar_t>( (static_cast<accscalar_t>(qkv_v) + static_cast<accscalar_t>(qkv_bias_v))); } else { qkv_q = 0; qkv_k = 0; qkv_v = 0; } q_k_v[0][b][nh][t][dh] = qkv_q; q_k_v[1][b][nh][t][dh] = qkv_k; q_k_v[2][b][nh][t][dh] = qkv_v; } } } Tensor collapse_dims_1_and_2(const Tensor& sizes) { auto sizes_dim1 = at::native::narrow_symint(sizes, 1, 0, 1); auto sizes_dim2 = at::native::narrow_symint(sizes, 1, 1, 1); return (sizes_dim1 * sizes_dim2).contiguous(); } } // namespace // compute q = (q + q_bias) / sqrt(dim_per_head), k = k + k_bias, v = v + v_bias __host__ std::tuple<Tensor, Tensor, Tensor> transform_bias_rescale_qkv_cuda( const Tensor& qkv, const Tensor& qkv_bias, const int64_t num_head) { auto B = qkv.is_nested() ? get_nested_tensor_impl(qkv)->get_nested_size_tensor().size(0) : qkv.size(0); // TODO: calculate this without the std::vector -- NestedTensor_to_mask wants // this too auto T = qkv.is_nested() ? NestedTensor_get_max_size(*get_nested_tensor_impl(qkv))[0] : qkv.size(1); if (qkv.is_nested()) { // Don't mess with non-nested case for now since it's not set up to fiddle // with mask size. // Round T up to next multiple of 8 so as to be able to utilize Tensor // cores. Otherwise, sometimes with padding, *no* row will have the maximum // sequence length and so we'll have a non-divisible-by-8 dimension even if // the model author chose a multiple of 8. T = T + (8 - (T % 8)) % 8; } auto _3D = qkv_bias.size(0); auto D = _3D / 3; TORCH_CHECK(D % num_head == 0); const auto dim_per_head = D / num_head; auto q_k_v = at::empty({3, B, num_head, T, dim_per_head}, qkv_bias.options()); #define CALL_KERNEL(assume_aligned) \ transform_bias_rescale_qkv_kernel<scalar_t, accscalar_t, assume_aligned> \ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \ qkv.packed_accessor64<scalar_t, 3, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) #define CALL_ADD_PADDING_KERNEL(assume_aligned) \ transform_bias_rescale_qkv_add_padding_kernel< \ scalar_t, \ accscalar_t, \ assume_aligned> \ <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( \ nt_qkv_buffer \ .packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ qkv_bias.packed_accessor64<scalar_t, 1, RestrictPtrTraits>(), \ offsets_ptr, \ sizes_ptr, \ q_k_v.packed_accessor64<scalar_t, 5, RestrictPtrTraits>(), \ 1.0 / std::sqrt(static_cast<scalar_t>(dim_per_head))) AT_DISPATCH_FLOATING_TYPES_AND2( ScalarType::Half, ScalarType::BFloat16, qkv.scalar_type(), "transform_bias_rescale_qkv", [&] { using accscalar_t = acc_type<scalar_t, true>; auto threads = std::max( std::min<int32_t>(1024, D / TRANSFORM_BIAS_RESCALE_VEC), 1); auto blocks = B * T; const bool aligned = ((dim_per_head % TRANSFORM_BIAS_RESCALE_VEC) == 0) && ((reinterpret_cast<intptr_t>(qkv_bias.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0); if (aligned) { TORCH_INTERNAL_ASSERT_DEBUG_ONLY( D % TRANSFORM_BIAS_RESCALE_VEC == 0, "D = num_heads * dim_per_head, so we should have dim_per_head % " "TRANSFORM_BIAS_RESCALE_VEC == 0 => " "D % TRANSFORM_BIAS_RESCALE_VEC == 0"); } if (qkv.is_nested()) { auto* nt_qkv = get_nested_tensor_impl(qkv); const at::Tensor& nt_qkv_buffer = nt_qkv->get_buffer(); auto sizes = collapse_dims_1_and_2(nt_qkv->get_nested_size_tensor()); auto offsets = NestedTensor_batch_offsets_from_size_tensor(sizes, sizes.numel()); at::native::narrow_symint(offsets, 0, sizes.numel() + 1, sizes.numel()) .copy_(sizes.reshape({-1})); auto metadata = offsets.to(at::Device(kCUDA), at::kInt, true, true); const auto offsets_ptr = metadata.data_ptr<int>(); const auto sizes_ptr = offsets_ptr + sizes.numel() + 1; const auto input_dim = sizes.sizes()[1]; TORCH_INTERNAL_ASSERT_DEBUG_ONLY(input_dim == 1); if (aligned && ((reinterpret_cast<intptr_t>(qkv.data_ptr()) % TRANSFORM_BIAS_RESCALE_VEC) == 0)) { CALL_ADD_PADDING_KERNEL(true); } else { CALL_ADD_PADDING_KERNEL(false); } } else if (aligned) { CALL_KERNEL(true); } else { CALL_KERNEL(false); } C10_CUDA_KERNEL_LAUNCH_CHECK(); }); #undef CALL_ADD_PADDING_KERNEL #undef CALL_KERNEL auto q_k_v_s = at::native::split(q_k_v.view({3 * B, num_head, T, dim_per_head}), B, 0); return std::make_tuple(q_k_v_s[0], q_k_v_s[1], q_k_v_s[2]); } std::tuple<Tensor, Tensor> native_multi_head_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, const int64_t embed_dim, const int64_t num_head, const Tensor& qkv_weight, const Tensor& qkv_bias, const Tensor& proj_weight, const Tensor& proj_bias, const c10::optional<Tensor>& mask, bool need_weights, bool average_attn_weights, const c10::optional<int64_t> mask_type) { // query shape: [B, T, D] // qkv_weight shape: [3 * D, D] TORCH_CHECK( !mask || !query.is_nested(), "NestedTensor with mask is not supported yet"); const auto D = embed_dim; TORCH_CHECK( query.dim() == 3, "expected 3-D `query`, got ", query.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || query.sizes()[2] == embed_dim, "passed-in embed_dim ", embed_dim, " didn't match last dim of query ", query.sizes()[2]); TORCH_CHECK( key.dim() == 3, "expected 3-D `key`, got ", key.dim(), "-D tensor"); TORCH_CHECK( value.dim() == 3, "expected 3-D `value`, got ", value.dim(), "-D tensor"); TORCH_CHECK( query.is_nested() || key.is_nested() || value.is_nested() || (query.sizes() == key.sizes() && key.sizes() == value.sizes()), "expected `query`/`key`/`value` shapes to match"); TORCH_CHECK( qkv_weight.dim() == 2, "expected 2-D `qkv_weight`, got ", qkv_weight.dim(), "-D tensor"); TORCH_CHECK( D * 3 == qkv_weight.sizes()[0], "expected `qkv_weight` first dim to be 3x embed_dim"); TORCH_CHECK( D == qkv_weight.sizes()[1], "expected `qkv_weight` second dim to be embed_Dim"); TORCH_CHECK( qkv_bias.dim() == 1, "expected 2-D `qkv_bias`, got ", qkv_bias.dim(), "-D tensor"); TORCH_CHECK( qkv_bias.sizes()[0] == 3 * D, "expected `qkv_bias` first dim and first dim of query to be equal"); TORCH_CHECK(D % num_head == 0, "`embed_dim` must divide evenly by `num_heads`"); #ifndef NDEBUG const auto B = query.is_nested() ? get_nested_tensor_impl(query)->get_nested_size_tensor().size(0) : query.sizes()[0]; auto T = query.is_nested() ? 0 : query.sizes()[1]; #endif const auto dim_per_head = D / num_head; if ((query.is_same(key) && key.is_same(value)) && dim_per_head % 8 == 0 && !need_weights) { // We have not done linear projection yet but the input for SDP // Is expected to be 4 dimensional. We "cheaply" create view tensors // That will then be used for checking hot path conditions with select_sd_backend auto q = query.view({query.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto k = key.view({key.size(0), -1, num_head, dim_per_head}).transpose(1, 2); auto v = value.view({value.size(0), -1, num_head, dim_per_head}).transpose(1, 2); sdp::sdp_params kernel_params{q, k, v, mask.has_value(), 0.0, false}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::flash_attention || backend == sdp::SDPBackend::efficient_attention) { auto x = at::linear(query, qkv_weight, qkv_bias); auto chunks = x.chunk(3, -1); auto x_size_0 = x.size(0); chunks[0] = (chunks[0].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[1] = (chunks[1].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); chunks[2] = (chunks[2].view({x_size_0, -1, num_head, dim_per_head})) .transpose(1, 2); auto y = at::scaled_dot_product_attention( chunks[0], chunks[1], chunks[2], mask, 0.0, false); auto past_sdp = y.transpose(1, 2).reshape({x_size_0, -1, embed_dim}); return std::make_tuple( at::linear(past_sdp, proj_weight, proj_bias), Tensor()); } // Returned math or error lets not use it } // shape: [B, T, 3 x D] auto qkv = qkv_projection(query, key, value, embed_dim, qkv_weight); if (!qkv.is_nested() && qkv.numel() == 0) { if (query.is_nested()) { return std::make_tuple(Tensor(), Tensor()); } return std::make_tuple(at::empty_like(query), Tensor()); } #ifndef NDEBUG if (!query.is_nested() || !qkv.is_nested()) { if (query.is_nested()) { T = qkv.size(1); } debug_assert_shape(__LINE__, qkv, {B, T, 3 * D}); } #endif #ifdef DEBUG_PRINT_EACH_STEP if (!qkv.is_nested()) { std::cerr << "qkv: " << qkv << std::endl; } #endif // shape: 3 x [B, num_head, T, dim_per_head] auto q_k_v = _transform_bias_rescale_qkv(qkv, qkv_bias, num_head); qkv = Tensor(); // Not used any more, allow free auto& q = std::get<0>(q_k_v); const auto& k = std::get<1>(q_k_v); const auto& v = std::get<2>(q_k_v); #ifndef NDEBUG debug_assert_shape(__LINE__, q, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, k, {B, num_head, T, dim_per_head}); debug_assert_shape(__LINE__, v, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "q: " << q << std::endl; std::cerr << "k: " << k << std::endl; std::cerr << "v: " << v << std::endl; #endif // shape: [B, num_head, T, T] auto qkt = bmm_nt(q, k); // q & k are dead but cannot be freed because they were packed with v #ifndef NDEBUG debug_assert_shape(__LINE__, qkt, {B, num_head, T, T}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt: " << qkt << std::endl; #endif // shape: [B, num_head, T, T] // TODO: long-term, have a kernel that works with // NestedTensor directly if there is no mask passed qkt = masked_softmax(qkt, mask, query, mask_type); #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "qkt after softmax: " << qkt << std::endl; #endif // shape: [B, num_head, T, dim_per_head] // reuse storage for q; we're done with it auto attn_ctx = bmm_nn(q, qkt, v); // qkv is not dead; we just reused storage for q! if (!need_weights) { qkt = Tensor(); } #ifndef NDEBUG debug_assert_shape(__LINE__, attn_ctx, {B, num_head, T, dim_per_head}); #endif #ifdef DEBUG_PRINT_EACH_STEP std::cerr << "attn_ctx: " << attn_ctx << std::endl; #endif // shape: [B, T, D] // Fuse transform_0213 inside auto proj = transform0213_gemm_nt_bias( attn_ctx, proj_weight, proj_bias, query); #ifndef NDEBUG debug_assert_shape(__LINE__, proj, {B, T, D}); #endif if (need_weights && average_attn_weights) { // weights are not needed for full transformer, so don't worry too // much about performance -- we implement this just to make use // cases that don't disable need_weights still get some speedup. qkt = qkt.sum(1); qkt /= num_head; } return std::make_tuple(std::move(proj), std::move(qkt)); } std::tuple<Tensor, Tensor, Tensor, Tensor, int64_t, int64_t, int64_t, int64_t, Tensor> _scaled_dot_product_flash_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, double dropout_p, bool is_causal, bool return_debug_mask) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.flash_attention"); // Query (Batch x Num_heads x Q_seq_len x Dim_per_head) // Key (Batch x Num_heads x KV_seq_len x Dim_per_head) // Value (Batch x Num_heads x KV_seq_len x Dim_per_head) const int64_t batch_size = query.size(0); const int64_t num_heads = query.size(1); const int64_t max_seqlen_batch_q = query.size(2); const int64_t head_dim = query.size(3); const int64_t max_seqlen_batch_k = key.size(2); const int64_t max_seqlen_batch_v = value.size(2); TORCH_CHECK( max_seqlen_batch_k == max_seqlen_batch_v, "Key and Value must have the same sequence length"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor cumulative_sequence_length_q = at::arange( 0, (batch_size + 1) * max_seqlen_batch_q, max_seqlen_batch_q, TensorOptions().device(at::kCUDA).dtype(at::kInt)); Tensor cumulative_sequence_length_k = at::arange( 0, (batch_size + 1) * max_seqlen_batch_k, max_seqlen_batch_k, TensorOptions().device(at::kCUDA).dtype(at::kInt)); int64_t Nnz_q{batch_size * max_seqlen_batch_q}; int64_t Nnz_kv{batch_size * max_seqlen_batch_k}; // For the standard MHA these will actually be views Tensor query_reshaped = q_t.reshape({Nnz_q, num_heads, head_dim}); Tensor key_reshaped = k_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor value_reshaped = v_t.reshape({Nnz_kv, num_heads, head_dim}); Tensor attention, log_sumexp, debug_attn_mask; int64_t philox_seed{0}, philox_offset{0}; std::tie(attention, log_sumexp, philox_seed, philox_offset, debug_attn_mask) = at::_flash_attention_forward( query_reshaped, key_reshaped, value_reshaped, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, is_causal, return_debug_mask); // Reshape output to convert nnz to batch_size and seq_len attention = attention.view({batch_size, max_seqlen_batch_q, num_heads, head_dim}).transpose(1,2); return std::make_tuple(attention, log_sumexp, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, philox_seed, philox_offset, debug_attn_mask); } std::tuple<Tensor, Tensor> _scaled_dot_product_efficient_attention_cuda( const Tensor& query, const Tensor& key, const Tensor& value, bool compute_log_sumexp, bool is_causal) { // Used for tracking usage statistics C10_LOG_API_USAGE_ONCE("torch.sdpa.mem_efficient_attention"); // Query -> Query(Batch x Q_seq_len x Num_heads x Dim_per_head) // Key -> Key(Batch x KV_seq_len x Num_heads x Dim_per_head) // Value -> Value(Batch x KV_seq_len x Num_heads x Dim_per_head) Tensor q_t = query.transpose(1, 2); Tensor k_t = key.transpose(1, 2); Tensor v_t = value.transpose(1, 2); Tensor attention, log_sumexp; std::tie(attention, log_sumexp) = at::_efficient_attention_forward( q_t, k_t, v_t, c10::nullopt, c10::nullopt, c10::nullopt, compute_log_sumexp, is_causal); attention = attention.transpose(1,2); return std::make_tuple(std::move(attention), std::move(log_sumexp)); } int64_t _fused_sdp_choice_cuda(const Tensor& query_, const Tensor& key, const Tensor& value, const c10::optional<Tensor>& attn_mask_, double dropout_p, bool is_causal){ sdp::sdp_params kernel_params{query_, key, value, attn_mask_.has_value(), dropout_p, is_causal}; auto backend = select_sdp_backend(kernel_params); if (backend == sdp::SDPBackend::error) { TORCH_CHECK( false, "No viable backend for scaled_dot_product_attention was found. ", "This is likely due to turning off both the math kernel and the fused kernels."); } return static_cast<int64_t>(backend); } bool _chunk_grad_outputs_efficient_attention( const Tensor& query, const Tensor& key, const Tensor& value, bool is_causal) { int64_t M = query.size(2); int64_t N = key.size(2); bool grad_kv_needs_init = is_causal && N > M; bool is_aliased = query.storage().is_alias_of(key.storage()) && query.storage().is_alias_of(value.storage()); bool equal_seq_len = query.size(2) == key.size(2); bool q_v_same_head_dim = query.size(3) == value.size(3); bool chunk_grad_outputs = (!grad_kv_needs_init && equal_seq_len && q_v_same_head_dim && is_aliased); return chunk_grad_outputs; } std::tuple<Tensor, Tensor, int64_t, int64_t, Tensor> _flash_attention_forward( const Tensor& query, const Tensor& key, const Tensor& value, const Tensor& cumulative_sequence_length_q, const Tensor& cumulative_sequence_length_k, const int64_t max_seqlen_batch_q, const int64_t max_seqlen_batch_k, double dropout_p, bool is_causal, bool return_debug_mask) { #if defined(USE_FLASH_ATTENTION) /* num_splits determines how much to parallelize over the seqlen_q dimension num_splits=0 means it will be set by an internal heuristic. We're exposing num_splits mostly for benchmarking. We will hard code it to 0 for now */ constexpr int num_splits{0}; auto softmax_scale = std::pow(query.size(-1), -0.5); at::Tensor output = at::empty_like(query); Tensor logsumexp, debug_attn_mask; uint64_t philox_seed{0}, philox_offset{0}; std::tie(logsumexp, philox_seed, philox_offset, debug_attn_mask) = fmha::mha_fwd( query, key, value, output, cumulative_sequence_length_q, cumulative_sequence_length_k, max_seqlen_batch_q, max_seqlen_batch_k, dropout_p, softmax_scale, false, /*zero_tensors = false for all calls here*/ is_causal, return_debug_mask, /*return_softmax (this is used for testing)*/ num_splits); debug_attn_mask = return_debug_mask ? debug_attn_mask : at::empty({0}, query.options()); int64_t signed_philox_seed = sdp::bit_cast<int64_t>(philox_seed); int64_t signed_philox_offset= sdp::bit_cast<int64_t>(philox_offset); return std::make_tuple(output, logsumexp, signed_philox_seed, signed_philox_offset, debug_attn_mask); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor(), Tensor(), 0, 0, Tensor()); } std::tuple<at::Tensor, at::Tensor> _efficient_attention_forward( const at::Tensor& query, // [b, seqlen, num_heads, K] const at::Tensor& key, // [b, seqlen, num_heads, K] const at::Tensor& value, // [b, seqlen, num_heads, Kv] // (Mode 1MHK only) [b+1]: cu_seqlens_q[b] contains the // position of the first query token for batch $b const c10::optional<at::Tensor>& cu_seqlens_q, // (Mode 1MHK only) [b+1]: cu_seqlens_k[b] contains the // position of the first key token for batch $b const c10::optional<at::Tensor>& cu_seqlens_k, // (Mode 1MHK only) Maximum sequence length across batches const c10::optional<int64_t> max_seqlen_q_, bool compute_logsumexp, bool causal) { #if defined(USE_FLASH_ATTENTION) // TODO In theory it is possible to compile with _CUDA_ARCH < 5.0 and run on a // machine that is >= 5.0. In practice, this is not a problem but since // this would avoid runtime architecture checks, we should look into it TORCH_CHECK(query.dim() == 4); TORCH_CHECK(key.dim() == 4); TORCH_CHECK(value.dim() == 4); // Batch sizes TORCH_CHECK(query.size(0) == key.size(0)); TORCH_CHECK(query.size(0) == value.size(0)); // Sequence length TORCH_CHECK(key.size(1) == value.size(1)); // Num heads TORCH_CHECK(query.size(2) == key.size(2)); TORCH_CHECK(query.size(2) == value.size(2)); // Embedding per head TORCH_CHECK(query.size(3) == key.size(3)); int64_t max_seqlen_q = 0, max_seqlen_k=0; TORCH_CHECK(cu_seqlens_q.has_value() == cu_seqlens_k.has_value()); if (cu_seqlens_q.has_value()) { TORCH_CHECK(cu_seqlens_q->scalar_type() == at::ScalarType::Int); TORCH_CHECK(cu_seqlens_k->scalar_type() == at::ScalarType::Int); TORCH_CHECK(cu_seqlens_q->dim() == 1 && cu_seqlens_k->dim() == 1); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_q)); CHECK_NOSPARSE_CONTIGUOUS_CUDA((*cu_seqlens_k)); TORCH_CHECK(cu_seqlens_q->size(0) == cu_seqlens_k->size(0)); TORCH_CHECK(query.size(0) == 1, "cu_seqlen only supports batch_size=1"); TORCH_CHECK(max_seqlen_q_.has_value()); max_seqlen_q = *max_seqlen_q_; max_seqlen_k = 0; // Will be set inside the kernel } else { max_seqlen_q = query.size(1); max_seqlen_k = key.size(1); } CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(query); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(key); CHECK_NOSPARSE_LASTCONTIGUOUS_CUDA(value); at::cuda::CUDAGuard device_guard(query.device()); cudaStream_t stream = at::cuda::getCurrentCUDAStream(); int64_t B = query.size(0); int64_t M = query.size(1); int64_t N = key.size(1); int64_t num_heads = query.size(-2); int64_t K = query.size(-1); int64_t Kv = value.size(-1); at::Tensor res; at::Tensor logsumexp; auto launchKernel = [&](auto _k, int computeCapability) { using Kernel = decltype(_k); using scalar_t = typename Kernel::scalar_t; (void)_k; res = at::empty( {B, M, num_heads, Kv}, query.options().dtype( TypeTraits<typename Kernel::output_t>::atScalarType())); // NOTE: Should be aligned (by padding) in case M is // not a good number for loading during backward constexpr decltype(M) kAlignLSE = Kernel::kAlignLSE; logsumexp = at::empty( {B, num_heads, compute_logsumexp ? ceil_div(max_seqlen_q, kAlignLSE) * kAlignLSE : 0}, query.options().dtype(at::ScalarType::Float)); typename Kernel::Params p; p.query_ptr = (scalar_t*)query.data_ptr(); p.key_ptr = (scalar_t*)key.data_ptr(); p.value_ptr = (scalar_t*)value.data_ptr(); p.logsumexp_ptr = compute_logsumexp ? (typename Kernel::lse_scalar_t*)logsumexp.data_ptr() : nullptr; at::Tensor output_accum; if (Kernel::kNeedsOutputAccumulatorBuffer) { output_accum = at::empty( {B, M, num_heads, Kv}, query.options().dtype( TypeTraits<typename Kernel::output_accum_t>::atScalarType())); p.output_accum_ptr = (typename Kernel::output_accum_t*)output_accum.data_ptr(); } else { p.output_accum_ptr = nullptr; } p.output_ptr = (typename Kernel::output_t*)res.data_ptr(); if (cu_seqlens_q.has_value()) { p.cu_seqlens_q_ptr = (int32_t*)cu_seqlens_q->data_ptr(); p.cu_seqlens_k_ptr = (int32_t*)cu_seqlens_k->data_ptr(); } #define ASSIGN_CHECK_OVERFLOW(A, B) \ { \ A = B; \ TORCH_CHECK(B < std::numeric_limits<decltype(A)>::max(), #B " overflows"); \ } p.num_heads = num_heads; p.head_dim = query.size(3); p.head_dim_value = value.size(3); p.num_queries = max_seqlen_q; p.num_keys = max_seqlen_k; p.num_batches = cu_seqlens_q.has_value() ? cu_seqlens_q->size(0) - 1 : B; p.causal = causal; ASSIGN_CHECK_OVERFLOW(p.q_strideB, query.stride(0)); ASSIGN_CHECK_OVERFLOW(p.k_strideB, key.stride(0)); ASSIGN_CHECK_OVERFLOW(p.v_strideB, value.stride(0)); ASSIGN_CHECK_OVERFLOW(p.q_strideM, query.stride(1)); ASSIGN_CHECK_OVERFLOW(p.k_strideM, key.stride(1)); ASSIGN_CHECK_OVERFLOW(p.v_strideM, value.stride(1)); ASSIGN_CHECK_OVERFLOW(p.q_strideH, query.stride(2)); ASSIGN_CHECK_OVERFLOW(p.k_strideH, key.stride(2)); ASSIGN_CHECK_OVERFLOW(p.v_strideH, value.stride(2)); constexpr auto kernel_fn = attention_kernel_batched<Kernel>; size_t smem_bytes = sizeof(typename Kernel::SharedStorage); if (smem_bytes > 0xc000) { TORCH_INTERNAL_ASSERT( computeCapability >= 70, "This kernel requires too much shared memory on this machine!"); AT_CUDA_CHECK(cudaFuncSetAttribute( kernel_fn, cudaFuncAttributeMaxDynamicSharedMemorySize, smem_bytes)); } Kernel::check_supported(p); kernel_fn<<<p.getBlocksGrid(), p.getThreadsGrid(), smem_bytes, stream>>>(p); }; // Dispatch to the right kernel DISPATCH_KERNEL(query, key, value, ([&]() { launchKernel(Kernel{}, computeCapability); })); AT_CUDA_CHECK(cudaGetLastError()); return std::make_tuple(res, logsumexp); #endif TORCH_CHECK(false, "USE_FLASH_ATTENTION was not enabled for build.") return std::make_tuple(Tensor{}, Tensor{}); } Tensor triton_scaled_dot_attention(const Tensor& q, const Tensor& k, const Tensor& v, double dropout_p){ TORCH_CHECK(false, "This operator should be overridden in python before use"); return at::Tensor(); } REGISTER_CUDA_DISPATCH(_fused_sdp_choice_stub, &_fused_sdp_choice_cuda); } // namespace native } // namespace at
bd534823bb6207eae74c88d6bf74bffa4c6dd9b3.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "NvInfer.h" #include "logger.h" #include "pluginKernels.h" #include "pluginUtil.h" #include "skipLayerNormPlugin.h" #include "common.h" #include <cassert> #include <cstring> #include <vector> using namespace nvinfer1; using bert::operator+; namespace bert { namespace test { template <typename T, unsigned TPB> __global__ void skipLayerNormKernelSmall( const int ld, const T* input, const T* skip, const float* beta, const float* gamma, T* output) { const T rld = T(1) / T(ld); const int offset = blockIdx.x * ld; hipcub::Sum pairSum; // reduce x and x^2 kvp<T> threadData(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = input[idx] + skip[idx]; const T rldval = rld * val; threadData = pairSum(threadData, kvp<T>(rldval, rldval * val)); } layerNormSmall<T, TPB>(val, threadData, ld, idx, beta, gamma, output); } template <typename T, unsigned TPB> __global__ void skipLayerNormKernel( const int ld, const T* input, const T* skip, const float* beta, const float* gamma, T* output) { const T rld = T(1) / T(ld); const int offset = blockIdx.x * ld; hipcub::Sum pairSum; // reduce x and x^2 kvp<T> threadData(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = input[idx] + skip[idx]; const T rldval = rld * val; threadData = pairSum(threadData, kvp<T>(rldval, rldval * val)); output[idx] = val; } layerNorm<T, TPB>(threadData, ld, offset, beta, gamma, output); } template <typename T> int computeSkipLayerNorm(hipStream_t stream, const int ld, const int n, const T* input, const T* skip, const float* beta, const float* gamma, T* output) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int gridSize = n / ld; if (ld <= 32) { constexpr int blockSize = 32; hipLaunchKernelGGL(( skipLayerNormKernelSmall<T, blockSize>) , dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output); } else if (ld <= 128) { constexpr int blockSize = 128; hipLaunchKernelGGL(( skipLayerNormKernelSmall<T, blockSize>) , dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output); } else if (ld == 384) { constexpr int blockSize = 384; hipLaunchKernelGGL(( skipLayerNormKernelSmall<T, blockSize>) , dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output); } else { constexpr int blockSize = 256; hipLaunchKernelGGL(( skipLayerNormKernel<T, blockSize>), dim3(gridSize), dim3(blockSize), 0, stream, ld, input, skip, beta, gamma, output); } CHECK(hipPeekAtLastError()); return 0; } // Clip plugin specific constants namespace { static const char* SKIP_LAYER_NORM_VERSION{"1"}; static const char* SKIP_LAYER_NORM_NAME{"CustomSkipLayerNormPluginDynamic"}; } // namespace // Static class fields initialization thread_local PluginFieldCollection SkipLayerNormPluginDynamicCreator::mFC{}; thread_local std::vector<PluginField> SkipLayerNormPluginDynamicCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(SkipLayerNormPluginDynamicCreator); SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic( const std::string name, const int ld, const Weights& beta, const Weights& gamma) : mLayerName(name) , mLd(ld) , mGamma(gamma) , mBeta(beta) { } SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic(const std::string name, const void* data, size_t length) : mLayerName(name) { gLogVerbose << "Skip LN Deser start\n"; // Deserialize in the same order as serialization const char* d = static_cast<const char*>(data); const char* a = d; DESER(d, mType); DESER(d, mLd); mBetaDev = deserToDev<float>(d, mLd); mGammaDev = deserToDev<float>(d, mLd); assert(d == (a + length)); // this signals init not to allocate/copy mGamma.count = mLd; mGamma.values = nullptr; mBeta.count = mLd; mBeta.values = nullptr; gLogVerbose << "Skip LN Deser done\n"; } // IPluginV2DynamicExt Methods IPluginV2DynamicExt* SkipLayerNormPluginDynamic::clone() const { return new SkipLayerNormPluginDynamic(mLayerName, mLd, mBeta, mGamma); } DimsExprs SkipLayerNormPluginDynamic::getOutputDimensions(int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder) { assert(nbInputs == 2); assert(outputIndex == 0); assert(inputs[0].nbDims == inputs[1].nbDims); return inputs[0]; } bool SkipLayerNormPluginDynamic::supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) { assert(nbInputs == 2); assert(nbOutputs == 1); const PluginTensorDesc& in = inOut[pos]; if (pos == 0) { return (in.type == DataType::kFLOAT || in.type == DataType::kHALF) && (in.format == TensorFormat::kLINEAR); } const PluginTensorDesc& prev = inOut[pos - 1]; if (pos == 1) { return in.type == prev.type && in.format == prev.format; } // output return in.type == prev.type && in.format == prev.format; } void SkipLayerNormPluginDynamic::configurePlugin(const DynamicPluginTensorDesc* inputs, int nbInputs, const DynamicPluginTensorDesc* outputs, int nbOutputs) { // Validate input arguments assert(nbOutputs == 1); assert(nbInputs == 2); mType = inputs[0].desc.type; assert(mType == inputs[1].desc.type); const auto& inDims0 = inputs[0].desc.dims; const auto& inDims1 = inputs[1].desc.dims; assert(inDims0.nbDims == inDims1.nbDims); assert(std::equal(inDims0.d, inDims0.d + inDims0.nbDims, inDims1.d)); assert(inDims0.nbDims== 5); mLd = inDims0.d[2]; // hiddensize assert(inDims0.d[3] == 1); assert(inDims0.d[4] == 1); } size_t SkipLayerNormPluginDynamic::getWorkspaceSize(const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const { return 0; } int SkipLayerNormPluginDynamic::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, hipStream_t stream) { const int inputVolume = samplesCommon::volume(inputDesc[0].dims); int status = -1; // Our plugin outputs only one tensor // Launch CUDA kernel wrapper and save its return value if (mType == DataType::kFLOAT) { const float* input = static_cast<const float*>(inputs[0]); const float* skip = static_cast<const float*>(inputs[1]); float* output = static_cast<float*>(outputs[0]); status = computeSkipLayerNorm<float>(stream, mLd, inputVolume, input, skip, mBetaDev, mGammaDev, output); } else if (mType == DataType::kHALF) { const half* input = static_cast<const half*>(inputs[0]); const half* skip = static_cast<const half*>(inputs[1]); half* output = static_cast<half*>(outputs[0]); status = computeSkipLayerNorm<half>(stream, mLd, inputVolume, input, skip, mBetaDev, mGammaDev, output); } else { gLogError << "Unsupported Type\n"; assert(false); } return status; } // IPluginV2Ext Methods DataType SkipLayerNormPluginDynamic::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { assert(index == 0); assert(nbInputs == 2); assert(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF); assert(inputTypes[0] == inputTypes[1]); return inputTypes[0]; } // IPluginV2 Methods const char* SkipLayerNormPluginDynamic::getPluginType() const { return SKIP_LAYER_NORM_NAME; } const char* SkipLayerNormPluginDynamic::getPluginVersion() const { return SKIP_LAYER_NORM_VERSION; } int SkipLayerNormPluginDynamic::getNbOutputs() const { return 1; } int SkipLayerNormPluginDynamic::initialize() { if (mGamma.values) { CHECK(hipMalloc(&mGammaDev, sizeof(float) * mGamma.count)); CHECK(hipMemcpy(mGammaDev, mGamma.values, sizeof(float) * mGamma.count, hipMemcpyHostToDevice)); } if (mBeta.values) { CHECK(hipMalloc(&mBetaDev, sizeof(float) * mBeta.count)); CHECK(hipMemcpy(mBetaDev, mBeta.values, sizeof(float) * mGamma.count, hipMemcpyHostToDevice)); } return 0; } void SkipLayerNormPluginDynamic::terminate() { gLogVerbose << "SKIPLN terminate start" << std::endl; hipFree(mGammaDev); hipFree(mBetaDev); gLogVerbose << "SKIPLN terminate done" << std::endl; } size_t SkipLayerNormPluginDynamic::getSerializationSize() const { return 2 * sizeof(float) * mLd + sizeof(DataType) + sizeof(mLd) ; } void SkipLayerNormPluginDynamic::serialize(void* buffer) const { char* d = static_cast<char*>(buffer); const char* a = d; writeToBuffer(d, mType); writeToBuffer(d, mLd); serFromDev(d, mBetaDev, mLd); serFromDev(d, mGammaDev, mLd); assert(d == a + getSerializationSize()); } void SkipLayerNormPluginDynamic::destroy() { // This gets called when the network containing plugin is destroyed delete this; } void SkipLayerNormPluginDynamic::setPluginNamespace(const char* libNamespace) { mNamespace = libNamespace; } const char* SkipLayerNormPluginDynamic::getPluginNamespace() const { return mNamespace.c_str(); } ///////////////////////////////////////////////////////// SkipLayerNormPluginDynamicCreator::SkipLayerNormPluginDynamicCreator() { mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* SkipLayerNormPluginDynamicCreator::getPluginName() const { return SKIP_LAYER_NORM_NAME; } const char* SkipLayerNormPluginDynamicCreator::getPluginVersion() const { return SKIP_LAYER_NORM_VERSION; } const PluginFieldCollection* SkipLayerNormPluginDynamicCreator::getFieldNames() { return &mFC; } IPluginV2* SkipLayerNormPluginDynamicCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { gLogVerbose << "Creating SkipLayerNormPluginDynamicCreator...\n"; int ld; Weights beta; Weights gamma; for (int i = 0; i < fc->nbFields; i++) { std::string field_name(fc->fields[i].name); if (field_name.compare("ld") == 0) { ld = *static_cast<const int*>(fc->fields[i].data); gLogVerbose << "Building ld: " << ld << std::endl; } if (field_name.compare("beta") == 0) { gLogVerbose << "Building beta...\n"; beta.values = fc->fields[i].data; beta.count = fc->fields[i].length; beta.type = static_cast<DataType>(fc->fields[i].type); } if (field_name.compare("gamma") == 0) { gLogVerbose << "Building gamma...\n"; gamma.values = fc->fields[i].data; gamma.count = fc->fields[i].length; gamma.type = static_cast<DataType>(fc->fields[i].type); } } SkipLayerNormPluginDynamic* p = new SkipLayerNormPluginDynamic(name, ld, beta, gamma); return p; } IPluginV2* SkipLayerNormPluginDynamicCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { // This object will be deleted when the network is destroyed, which will // call SkipLayerNormPluginDynamic::destroy() return new SkipLayerNormPluginDynamic(name, serialData, serialLength); } void SkipLayerNormPluginDynamicCreator::setPluginNamespace(const char* libNamespace) { mNamespace = libNamespace; } const char* SkipLayerNormPluginDynamicCreator::getPluginNamespace() const { return mNamespace.c_str(); } } }
bd534823bb6207eae74c88d6bf74bffa4c6dd9b3.cu
/* * Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include "NvInfer.h" #include "logger.h" #include "pluginKernels.h" #include "pluginUtil.h" #include "skipLayerNormPlugin.h" #include "common.h" #include <cassert> #include <cstring> #include <vector> using namespace nvinfer1; using bert::operator+; namespace bert { namespace test { template <typename T, unsigned TPB> __global__ void skipLayerNormKernelSmall( const int ld, const T* input, const T* skip, const float* beta, const float* gamma, T* output) { const T rld = T(1) / T(ld); const int offset = blockIdx.x * ld; cub::Sum pairSum; // reduce x and x^2 kvp<T> threadData(0, 0); const int idx = offset + threadIdx.x; T val = 0; if (threadIdx.x < ld) { val = input[idx] + skip[idx]; const T rldval = rld * val; threadData = pairSum(threadData, kvp<T>(rldval, rldval * val)); } layerNormSmall<T, TPB>(val, threadData, ld, idx, beta, gamma, output); } template <typename T, unsigned TPB> __global__ void skipLayerNormKernel( const int ld, const T* input, const T* skip, const float* beta, const float* gamma, T* output) { const T rld = T(1) / T(ld); const int offset = blockIdx.x * ld; cub::Sum pairSum; // reduce x and x^2 kvp<T> threadData(0, 0); for (int i = threadIdx.x; i < ld; i += TPB) { const int idx = offset + i; const T val = input[idx] + skip[idx]; const T rldval = rld * val; threadData = pairSum(threadData, kvp<T>(rldval, rldval * val)); output[idx] = val; } layerNorm<T, TPB>(threadData, ld, offset, beta, gamma, output); } template <typename T> int computeSkipLayerNorm(cudaStream_t stream, const int ld, const int n, const T* input, const T* skip, const float* beta, const float* gamma, T* output) { // this must be true because n is the total size of the tensor assert(n % ld == 0); const int gridSize = n / ld; if (ld <= 32) { constexpr int blockSize = 32; skipLayerNormKernelSmall<T, blockSize> <<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output); } else if (ld <= 128) { constexpr int blockSize = 128; skipLayerNormKernelSmall<T, blockSize> <<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output); } else if (ld == 384) { constexpr int blockSize = 384; skipLayerNormKernelSmall<T, blockSize> <<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output); } else { constexpr int blockSize = 256; skipLayerNormKernel<T, blockSize><<<gridSize, blockSize, 0, stream>>>(ld, input, skip, beta, gamma, output); } CHECK(cudaPeekAtLastError()); return 0; } // Clip plugin specific constants namespace { static const char* SKIP_LAYER_NORM_VERSION{"1"}; static const char* SKIP_LAYER_NORM_NAME{"CustomSkipLayerNormPluginDynamic"}; } // namespace // Static class fields initialization thread_local PluginFieldCollection SkipLayerNormPluginDynamicCreator::mFC{}; thread_local std::vector<PluginField> SkipLayerNormPluginDynamicCreator::mPluginAttributes; REGISTER_TENSORRT_PLUGIN(SkipLayerNormPluginDynamicCreator); SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic( const std::string name, const int ld, const Weights& beta, const Weights& gamma) : mLayerName(name) , mLd(ld) , mGamma(gamma) , mBeta(beta) { } SkipLayerNormPluginDynamic::SkipLayerNormPluginDynamic(const std::string name, const void* data, size_t length) : mLayerName(name) { gLogVerbose << "Skip LN Deser start\n"; // Deserialize in the same order as serialization const char* d = static_cast<const char*>(data); const char* a = d; DESER(d, mType); DESER(d, mLd); mBetaDev = deserToDev<float>(d, mLd); mGammaDev = deserToDev<float>(d, mLd); assert(d == (a + length)); // this signals init not to allocate/copy mGamma.count = mLd; mGamma.values = nullptr; mBeta.count = mLd; mBeta.values = nullptr; gLogVerbose << "Skip LN Deser done\n"; } // IPluginV2DynamicExt Methods IPluginV2DynamicExt* SkipLayerNormPluginDynamic::clone() const { return new SkipLayerNormPluginDynamic(mLayerName, mLd, mBeta, mGamma); } DimsExprs SkipLayerNormPluginDynamic::getOutputDimensions(int outputIndex, const DimsExprs* inputs, int nbInputs, IExprBuilder& exprBuilder) { assert(nbInputs == 2); assert(outputIndex == 0); assert(inputs[0].nbDims == inputs[1].nbDims); return inputs[0]; } bool SkipLayerNormPluginDynamic::supportsFormatCombination(int pos, const PluginTensorDesc* inOut, int nbInputs, int nbOutputs) { assert(nbInputs == 2); assert(nbOutputs == 1); const PluginTensorDesc& in = inOut[pos]; if (pos == 0) { return (in.type == DataType::kFLOAT || in.type == DataType::kHALF) && (in.format == TensorFormat::kLINEAR); } const PluginTensorDesc& prev = inOut[pos - 1]; if (pos == 1) { return in.type == prev.type && in.format == prev.format; } // output return in.type == prev.type && in.format == prev.format; } void SkipLayerNormPluginDynamic::configurePlugin(const DynamicPluginTensorDesc* inputs, int nbInputs, const DynamicPluginTensorDesc* outputs, int nbOutputs) { // Validate input arguments assert(nbOutputs == 1); assert(nbInputs == 2); mType = inputs[0].desc.type; assert(mType == inputs[1].desc.type); const auto& inDims0 = inputs[0].desc.dims; const auto& inDims1 = inputs[1].desc.dims; assert(inDims0.nbDims == inDims1.nbDims); assert(std::equal(inDims0.d, inDims0.d + inDims0.nbDims, inDims1.d)); assert(inDims0.nbDims== 5); mLd = inDims0.d[2]; // hiddensize assert(inDims0.d[3] == 1); assert(inDims0.d[4] == 1); } size_t SkipLayerNormPluginDynamic::getWorkspaceSize(const PluginTensorDesc* inputs, int nbInputs, const PluginTensorDesc* outputs, int nbOutputs) const { return 0; } int SkipLayerNormPluginDynamic::enqueue(const PluginTensorDesc* inputDesc, const PluginTensorDesc* outputDesc, const void* const* inputs, void* const* outputs, void* workspace, cudaStream_t stream) { const int inputVolume = samplesCommon::volume(inputDesc[0].dims); int status = -1; // Our plugin outputs only one tensor // Launch CUDA kernel wrapper and save its return value if (mType == DataType::kFLOAT) { const float* input = static_cast<const float*>(inputs[0]); const float* skip = static_cast<const float*>(inputs[1]); float* output = static_cast<float*>(outputs[0]); status = computeSkipLayerNorm<float>(stream, mLd, inputVolume, input, skip, mBetaDev, mGammaDev, output); } else if (mType == DataType::kHALF) { const half* input = static_cast<const half*>(inputs[0]); const half* skip = static_cast<const half*>(inputs[1]); half* output = static_cast<half*>(outputs[0]); status = computeSkipLayerNorm<half>(stream, mLd, inputVolume, input, skip, mBetaDev, mGammaDev, output); } else { gLogError << "Unsupported Type\n"; assert(false); } return status; } // IPluginV2Ext Methods DataType SkipLayerNormPluginDynamic::getOutputDataType(int index, const DataType* inputTypes, int nbInputs) const { assert(index == 0); assert(nbInputs == 2); assert(inputTypes[0] == DataType::kFLOAT || inputTypes[0] == DataType::kHALF); assert(inputTypes[0] == inputTypes[1]); return inputTypes[0]; } // IPluginV2 Methods const char* SkipLayerNormPluginDynamic::getPluginType() const { return SKIP_LAYER_NORM_NAME; } const char* SkipLayerNormPluginDynamic::getPluginVersion() const { return SKIP_LAYER_NORM_VERSION; } int SkipLayerNormPluginDynamic::getNbOutputs() const { return 1; } int SkipLayerNormPluginDynamic::initialize() { if (mGamma.values) { CHECK(cudaMalloc(&mGammaDev, sizeof(float) * mGamma.count)); CHECK(cudaMemcpy(mGammaDev, mGamma.values, sizeof(float) * mGamma.count, cudaMemcpyHostToDevice)); } if (mBeta.values) { CHECK(cudaMalloc(&mBetaDev, sizeof(float) * mBeta.count)); CHECK(cudaMemcpy(mBetaDev, mBeta.values, sizeof(float) * mGamma.count, cudaMemcpyHostToDevice)); } return 0; } void SkipLayerNormPluginDynamic::terminate() { gLogVerbose << "SKIPLN terminate start" << std::endl; cudaFree(mGammaDev); cudaFree(mBetaDev); gLogVerbose << "SKIPLN terminate done" << std::endl; } size_t SkipLayerNormPluginDynamic::getSerializationSize() const { return 2 * sizeof(float) * mLd + sizeof(DataType) + sizeof(mLd) ; } void SkipLayerNormPluginDynamic::serialize(void* buffer) const { char* d = static_cast<char*>(buffer); const char* a = d; writeToBuffer(d, mType); writeToBuffer(d, mLd); serFromDev(d, mBetaDev, mLd); serFromDev(d, mGammaDev, mLd); assert(d == a + getSerializationSize()); } void SkipLayerNormPluginDynamic::destroy() { // This gets called when the network containing plugin is destroyed delete this; } void SkipLayerNormPluginDynamic::setPluginNamespace(const char* libNamespace) { mNamespace = libNamespace; } const char* SkipLayerNormPluginDynamic::getPluginNamespace() const { return mNamespace.c_str(); } ///////////////////////////////////////////////////////// SkipLayerNormPluginDynamicCreator::SkipLayerNormPluginDynamicCreator() { mFC.nbFields = mPluginAttributes.size(); mFC.fields = mPluginAttributes.data(); } const char* SkipLayerNormPluginDynamicCreator::getPluginName() const { return SKIP_LAYER_NORM_NAME; } const char* SkipLayerNormPluginDynamicCreator::getPluginVersion() const { return SKIP_LAYER_NORM_VERSION; } const PluginFieldCollection* SkipLayerNormPluginDynamicCreator::getFieldNames() { return &mFC; } IPluginV2* SkipLayerNormPluginDynamicCreator::createPlugin(const char* name, const PluginFieldCollection* fc) { gLogVerbose << "Creating SkipLayerNormPluginDynamicCreator...\n"; int ld; Weights beta; Weights gamma; for (int i = 0; i < fc->nbFields; i++) { std::string field_name(fc->fields[i].name); if (field_name.compare("ld") == 0) { ld = *static_cast<const int*>(fc->fields[i].data); gLogVerbose << "Building ld: " << ld << std::endl; } if (field_name.compare("beta") == 0) { gLogVerbose << "Building beta...\n"; beta.values = fc->fields[i].data; beta.count = fc->fields[i].length; beta.type = static_cast<DataType>(fc->fields[i].type); } if (field_name.compare("gamma") == 0) { gLogVerbose << "Building gamma...\n"; gamma.values = fc->fields[i].data; gamma.count = fc->fields[i].length; gamma.type = static_cast<DataType>(fc->fields[i].type); } } SkipLayerNormPluginDynamic* p = new SkipLayerNormPluginDynamic(name, ld, beta, gamma); return p; } IPluginV2* SkipLayerNormPluginDynamicCreator::deserializePlugin(const char* name, const void* serialData, size_t serialLength) { // This object will be deleted when the network is destroyed, which will // call SkipLayerNormPluginDynamic::destroy() return new SkipLayerNormPluginDynamic(name, serialData, serialLength); } void SkipLayerNormPluginDynamicCreator::setPluginNamespace(const char* libNamespace) { mNamespace = libNamespace; } const char* SkipLayerNormPluginDynamicCreator::getPluginNamespace() const { return mNamespace.c_str(); } } }
9fc946fe1ba7125d2200506c02d282853f23ff31.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #ifdef WITH_CUDA #include "hnms.h" #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <THH/THH.h> #include <THH/THHDeviceUtils.cuh> #include <vector> #include <iostream> #include <ctime> #include <chrono> using namespace std::chrono; #define CONF_TO_INT_MULT 1000000 #define CONF_TO_INT_ADD 100000 #define CONF_TO_INT(x) (long long)((x) * CONF_TO_INT_MULT) + CONF_TO_INT_ADD #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 512; int const threadsPerBlock = sizeof(unsigned long long) * 8; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename T> __global__ void hnms_max_conf_kernel(long long nthreads, T* box_confs, int64_t* cell_indices, int64_t* cell_max_confs) { CUDA_1D_KERNEL_LOOP(i, nthreads) { unsigned long long conf = CONF_TO_INT(box_confs[i]); unsigned long long cell = cell_indices[i]; unsigned long long * cell_max = (unsigned long long*)(cell_max_confs + cell); // long long type is not supported for atomiMax atomicMax(cell_max, conf); } } template <typename T> __global__ void hnms_max_idx_kernel(long long nthreads, T* box_confs, int64_t* cell_indices, int64_t* cell_max_confs) { CUDA_1D_KERNEL_LOOP(i, nthreads) { unsigned long long conf = CONF_TO_INT(box_confs[i]); auto cell = cell_indices[i]; unsigned long long* cell_max = (unsigned long long*)(cell_max_confs + cell); // no implementation to take long long, but unsigned long long atomicCAS(cell_max, conf, (unsigned long long)i); } } template <typename T> __global__ void hash_rects_kernel(int64_t nthreads, T* dets, T w0, T h0, T alpha, T bx, T by, T alpha_ratio, int64_t* out) { CUDA_1D_KERNEL_LOOP(idx_box, nthreads) { auto log_w0 = log(w0); auto log_h0 = log(h0); auto log_alpha = log(alpha); auto curr_det = dets + idx_box * 4; auto x = curr_det[0]; auto y = curr_det[1]; auto w = curr_det[2]; auto h = curr_det[3]; auto w0_alpha = w0 * alpha_ratio; auto h0_alpha = h0 * alpha_ratio; auto i = round((log_w0 - log(w)) / log_alpha); auto j = round((log_h0 - log(h)) / log_alpha); auto di = w0_alpha / pow(alpha, i); auto dj = h0_alpha / pow(alpha, j); int64_t qx, qy; qx = round(x / di - bx); qy = round(y / dj - by); auto curr_out = out + 4 * idx_box; curr_out[0] = qx; curr_out[1] = qy; curr_out[2] = i; curr_out[3] = j; } } at::Tensor hash_rects_cuda(const at::Tensor& dets, float w0, float h0, float alpha, float bx, float by) { auto num_box = dets.size(0); auto alpha_ratio = (1. - alpha) / (1. + alpha); auto result = at::zeros({long(num_box), 4}, dets.options().dtype(at::kLong)); AT_DISPATCH_FLOATING_TYPES(dets.type(), "HASH_RECTS", [&] { hipLaunchKernelGGL(( hash_rects_kernel<scalar_t>), dim3(GET_BLOCKS(num_box)), dim3(CUDA_NUM_THREADS), 0, 0, num_box, dets.data<scalar_t>(), (scalar_t)w0, (scalar_t)h0, (scalar_t)alpha, (scalar_t)bx, (scalar_t)by, alpha_ratio, result.data<int64_t>()); }); return result; } __global__ void map_code(int num_box, int64_t* codes, int64_t* codes_as_one) { CUDA_1D_KERNEL_LOOP(idx_box, num_box) { auto curr_code = codes + 4 * idx_box; auto curr_mapped = codes_as_one + idx_box; *curr_mapped = curr_code[0] + curr_code[1] * 10000 + curr_code[2] * 100000000 + curr_code[3] * 1000000000000; } } at::Tensor get_best_idx_each_code( at::Tensor codes, const at::Tensor& scores) { auto num_box = codes.size(0); auto codes_as_one = at::zeros({long(num_box)}, codes.options().dtype(at::kLong)); hipLaunchKernelGGL(( map_code), dim3(GET_BLOCKS(num_box)), dim3(CUDA_NUM_THREADS), 0, 0, num_box, codes.data<int64_t>(), codes_as_one.data<int64_t>()); THCudaCheck(hipGetLastError()); auto unique_result = at::unique_dim(codes_as_one, 0, // dim false, true); at::Tensor reverse_index = std::get<1>(unique_result); auto count = std::get<0>(unique_result).size(0); auto result = at::zeros({long(count)}, codes.options().dtype(at::kLong)); // get the maximum confidence score for each code with the atomic operation // of atomicMax. AT_DISPATCH_FLOATING_TYPES(scores.type(), "HNMS_MAX_IDX_KERNEL", [&] { hipLaunchKernelGGL(( hnms_max_conf_kernel<scalar_t>), dim3(GET_BLOCKS(num_box)), dim3(CUDA_NUM_THREADS), 0, 0, num_box, scores.data<scalar_t>(), reverse_index.data<int64_t>(), result.data<int64_t>()); }); THCudaCheck(hipGetLastError()); AT_DISPATCH_FLOATING_TYPES(scores.type(), "HNMS_MAX_IDX_KERNEL", [&] { hipLaunchKernelGGL(( hnms_max_idx_kernel<scalar_t>), dim3(GET_BLOCKS(num_box)), dim3(CUDA_NUM_THREADS), 0, 0, num_box, scores.data<scalar_t>(), reverse_index.data_ptr<int64_t>(), result.data<int64_t>()); // NULL, }); return result; } at::Tensor hnms_cuda(const at::Tensor& dets, const at::Tensor& scores, float w0, float h0, float alpha, float bx, float by ) { AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor"); AT_ASSERTM(dets.type() == scores.type(), "dets should have the same type as scores"); if (dets.numel() == 0) { return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); } auto codes = hash_rects_cuda(dets, w0, h0, alpha, bx, by); auto result = get_best_idx_each_code(codes, scores); return result; } #endif
9fc946fe1ba7125d2200506c02d282853f23ff31.cu
#ifdef WITH_CUDA #include "hnms.h" #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <THC/THC.h> #include <THC/THCDeviceUtils.cuh> #include <vector> #include <iostream> #include <ctime> #include <chrono> using namespace std::chrono; #define CONF_TO_INT_MULT 1000000 #define CONF_TO_INT_ADD 100000 #define CONF_TO_INT(x) (long long)((x) * CONF_TO_INT_MULT) + CONF_TO_INT_ADD #define CUDA_1D_KERNEL_LOOP(i, n) \ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \ i += blockDim.x * gridDim.x) const int CUDA_NUM_THREADS = 512; int const threadsPerBlock = sizeof(unsigned long long) * 8; inline int GET_BLOCKS(const int N) { return (N + CUDA_NUM_THREADS - 1) / CUDA_NUM_THREADS; } template <typename T> __global__ void hnms_max_conf_kernel(long long nthreads, T* box_confs, int64_t* cell_indices, int64_t* cell_max_confs) { CUDA_1D_KERNEL_LOOP(i, nthreads) { unsigned long long conf = CONF_TO_INT(box_confs[i]); unsigned long long cell = cell_indices[i]; unsigned long long * cell_max = (unsigned long long*)(cell_max_confs + cell); // long long type is not supported for atomiMax atomicMax(cell_max, conf); } } template <typename T> __global__ void hnms_max_idx_kernel(long long nthreads, T* box_confs, int64_t* cell_indices, int64_t* cell_max_confs) { CUDA_1D_KERNEL_LOOP(i, nthreads) { unsigned long long conf = CONF_TO_INT(box_confs[i]); auto cell = cell_indices[i]; unsigned long long* cell_max = (unsigned long long*)(cell_max_confs + cell); // no implementation to take long long, but unsigned long long atomicCAS(cell_max, conf, (unsigned long long)i); } } template <typename T> __global__ void hash_rects_kernel(int64_t nthreads, T* dets, T w0, T h0, T alpha, T bx, T by, T alpha_ratio, int64_t* out) { CUDA_1D_KERNEL_LOOP(idx_box, nthreads) { auto log_w0 = log(w0); auto log_h0 = log(h0); auto log_alpha = log(alpha); auto curr_det = dets + idx_box * 4; auto x = curr_det[0]; auto y = curr_det[1]; auto w = curr_det[2]; auto h = curr_det[3]; auto w0_alpha = w0 * alpha_ratio; auto h0_alpha = h0 * alpha_ratio; auto i = round((log_w0 - log(w)) / log_alpha); auto j = round((log_h0 - log(h)) / log_alpha); auto di = w0_alpha / pow(alpha, i); auto dj = h0_alpha / pow(alpha, j); int64_t qx, qy; qx = round(x / di - bx); qy = round(y / dj - by); auto curr_out = out + 4 * idx_box; curr_out[0] = qx; curr_out[1] = qy; curr_out[2] = i; curr_out[3] = j; } } at::Tensor hash_rects_cuda(const at::Tensor& dets, float w0, float h0, float alpha, float bx, float by) { auto num_box = dets.size(0); auto alpha_ratio = (1. - alpha) / (1. + alpha); auto result = at::zeros({long(num_box), 4}, dets.options().dtype(at::kLong)); AT_DISPATCH_FLOATING_TYPES(dets.type(), "HASH_RECTS", [&] { hash_rects_kernel<scalar_t><<<GET_BLOCKS(num_box), CUDA_NUM_THREADS>>>(num_box, dets.data<scalar_t>(), (scalar_t)w0, (scalar_t)h0, (scalar_t)alpha, (scalar_t)bx, (scalar_t)by, alpha_ratio, result.data<int64_t>()); }); return result; } __global__ void map_code(int num_box, int64_t* codes, int64_t* codes_as_one) { CUDA_1D_KERNEL_LOOP(idx_box, num_box) { auto curr_code = codes + 4 * idx_box; auto curr_mapped = codes_as_one + idx_box; *curr_mapped = curr_code[0] + curr_code[1] * 10000 + curr_code[2] * 100000000 + curr_code[3] * 1000000000000; } } at::Tensor get_best_idx_each_code( at::Tensor codes, const at::Tensor& scores) { auto num_box = codes.size(0); auto codes_as_one = at::zeros({long(num_box)}, codes.options().dtype(at::kLong)); map_code<<<GET_BLOCKS(num_box), CUDA_NUM_THREADS>>>(num_box, codes.data<int64_t>(), codes_as_one.data<int64_t>()); THCudaCheck(cudaGetLastError()); auto unique_result = at::unique_dim(codes_as_one, 0, // dim false, true); at::Tensor reverse_index = std::get<1>(unique_result); auto count = std::get<0>(unique_result).size(0); auto result = at::zeros({long(count)}, codes.options().dtype(at::kLong)); // get the maximum confidence score for each code with the atomic operation // of atomicMax. AT_DISPATCH_FLOATING_TYPES(scores.type(), "HNMS_MAX_IDX_KERNEL", [&] { hnms_max_conf_kernel<scalar_t><<<GET_BLOCKS(num_box), CUDA_NUM_THREADS>>>( num_box, scores.data<scalar_t>(), reverse_index.data<int64_t>(), result.data<int64_t>()); }); THCudaCheck(cudaGetLastError()); AT_DISPATCH_FLOATING_TYPES(scores.type(), "HNMS_MAX_IDX_KERNEL", [&] { hnms_max_idx_kernel<scalar_t><<<GET_BLOCKS(num_box), CUDA_NUM_THREADS>>>( num_box, scores.data<scalar_t>(), reverse_index.data_ptr<int64_t>(), result.data<int64_t>()); // NULL, }); return result; } at::Tensor hnms_cuda(const at::Tensor& dets, const at::Tensor& scores, float w0, float h0, float alpha, float bx, float by ) { AT_ASSERTM(dets.type().is_cuda(), "dets must be a CUDA tensor"); AT_ASSERTM(scores.type().is_cuda(), "scores must be a CUDA tensor"); AT_ASSERTM(dets.type() == scores.type(), "dets should have the same type as scores"); if (dets.numel() == 0) { return at::empty({0}, dets.options().dtype(at::kLong).device(at::kCPU)); } auto codes = hash_rects_cuda(dets, w0, h0, alpha, bx, by); auto result = get_best_idx_each_code(codes, scores); return result; } #endif
1687d9c719a7b57a689af819bde0ad945bb3ca68.hip
// !!! This is a file automatically generated by hipify!!! /* * EDDL Library - European Distributed Deep Learning Library. * Version: 0.3 * copyright (c) 2019, Universidad Politcnica de Valencia (UPV), PRHLT Research Centre * Date: October 2019 * Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected]) * All rights reserved */ #include <string.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <hip/hip_runtime.h> // GPU: Truth value testing __global__ void glogical_all(float *A, int size, bool &result){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; // if(!result) return; // Abort if there is a result if (thread_id_x < size && result){ if (A[thread_id_x] != 1.0f){ result = false; // return; } } } __global__ void glogical_any(float *A, int size, bool &result){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; // if(result) return; // Abort if there is a result if (thread_id_x < size && !result){ if (A[thread_id_x] == 1.0f){ result = true; // return; } } } __global__ void gpu_isfinite(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isfinite(A[thread_id_x]); } } __global__ void gpu_isinf(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]); } } __global__ void gpu_isnan(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isnan(A[thread_id_x]); } } __global__ void gpu_isneginf(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] < 0.0f; } } __global__ void gpu_isposinf(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] > 0.0f; } } __global__ void glogical_and(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] & (bool)B[thread_id_x]; } } __global__ void glogical_or(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] | (bool)B[thread_id_x]; } } __global__ void glogical_not(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = !((bool)A[thread_id_x]); } } __global__ void glogical_xor(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] ^ (bool)B[thread_id_x]; } } __global__ void glogical_allclose(float *A, float *B, float rtol, float atol, bool equal_nan, int size, bool &allclose){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; // if(!allclose) return; // Abort if there is a result if (thread_id_x < size && allclose){ bool close = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x])); if (!close){ allclose = false; // return; } } } __global__ void glogical_isclose(float *A, float *B, float *C, float rtol, float atol, bool equal_nan, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x])); } } __global__ void glogical_greater(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] > B[thread_id_x]; } } __global__ void glogical_greater_equal(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] >= B[thread_id_x]; } } __global__ void glogical_less(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] < B[thread_id_x]; } } __global__ void glogical_less_equal(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] <= B[thread_id_x]; } } __global__ void glogical_equal(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] == B[thread_id_x]; } } __global__ void glogical_not_equal(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] != B[thread_id_x]; } }
1687d9c719a7b57a689af819bde0ad945bb3ca68.cu
/* * EDDL Library - European Distributed Deep Learning Library. * Version: 0.3 * copyright (c) 2019, Universidad Politécnica de Valencia (UPV), PRHLT Research Centre * Date: October 2019 * Author: PRHLT Research Centre, UPV, ([email protected]), ([email protected]) * All rights reserved */ #include <string.h> #include <stdio.h> #include <stdlib.h> #include <iostream> #include <cuda.h> // GPU: Truth value testing __global__ void glogical_all(float *A, int size, bool &result){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; // if(!result) return; // Abort if there is a result if (thread_id_x < size && result){ if (A[thread_id_x] != 1.0f){ result = false; // return; } } } __global__ void glogical_any(float *A, int size, bool &result){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; // if(result) return; // Abort if there is a result if (thread_id_x < size && !result){ if (A[thread_id_x] == 1.0f){ result = true; // return; } } } __global__ void gpu_isfinite(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isfinite(A[thread_id_x]); } } __global__ void gpu_isinf(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]); } } __global__ void gpu_isnan(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isnan(A[thread_id_x]); } } __global__ void gpu_isneginf(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] < 0.0f; } } __global__ void gpu_isposinf(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = isinf(A[thread_id_x]) && A[thread_id_x] > 0.0f; } } __global__ void glogical_and(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] & (bool)B[thread_id_x]; } } __global__ void glogical_or(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] | (bool)B[thread_id_x]; } } __global__ void glogical_not(float *A, float *B, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ B[thread_id_x] = !((bool)A[thread_id_x]); } } __global__ void glogical_xor(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = (bool)A[thread_id_x] ^ (bool)B[thread_id_x]; } } __global__ void glogical_allclose(float *A, float *B, float rtol, float atol, bool equal_nan, int size, bool &allclose){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; // if(!allclose) return; // Abort if there is a result if (thread_id_x < size && allclose){ bool close = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x])); if (!close){ allclose = false; // return; } } } __global__ void glogical_isclose(float *A, float *B, float *C, float rtol, float atol, bool equal_nan, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = fabsf(A[thread_id_x] - B[thread_id_x]) <= (atol + rtol * fabsf(B[thread_id_x])); } } __global__ void glogical_greater(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] > B[thread_id_x]; } } __global__ void glogical_greater_equal(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] >= B[thread_id_x]; } } __global__ void glogical_less(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] < B[thread_id_x]; } } __global__ void glogical_less_equal(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] <= B[thread_id_x]; } } __global__ void glogical_equal(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] == B[thread_id_x]; } } __global__ void glogical_not_equal(float *A, float *B, float *C, int size){ long int thread_id_x = blockIdx.x * blockDim.x + threadIdx.x; if (thread_id_x < size){ C[thread_id_x] = A[thread_id_x] != B[thread_id_x]; } }
a0dac4ce3a7ddcf5435354c57ead3a601a5034a4.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void vector_add(float *a, float *b, float *c, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < n) c[tid] = a[tid] + b[tid]; } int main( int argc, char* argv[] ) { hipEvent_t start,stop; float elapsedTime; hipEventCreate(&start); hipEventCreate(&stop); if(argc < 2) { printf("need exactly 1 argument\n"); return 0; } int vector_size = atoi(argv[1]); float* host_a = (float*)malloc(sizeof(float) * vector_size); float* host_b = (float*)malloc(sizeof(float) * vector_size); float* host_c = (float*)malloc(sizeof(float) * vector_size); float* device_a; float* device_b; float* device_c; hipMalloc(&device_a, sizeof(float)*vector_size); hipMalloc(&device_b, sizeof(float)*vector_size); hipMalloc(&device_c, sizeof(float)*vector_size); int i; for(i=0; i<vector_size; i++) { host_a[i] = 1; host_b[i] = 1; //host_a[i] = rand() % vector_size; //host_b[i] = rand() % vector_size; } hipEventRecord(start,0); hipMemcpy( device_a, host_a, sizeof(float)*vector_size, hipMemcpyHostToDevice); hipMemcpy( device_b, host_a, sizeof(float)*vector_size, hipMemcpyHostToDevice); int block_size = 1024; int grid_size = vector_size / block_size; if(vector_size % block_size) { grid_size = grid_size + 1; } hipLaunchKernelGGL(( vector_add), dim3(grid_size), dim3(block_size), 0, 0, device_a, device_b, device_c, vector_size); hipMemcpy( host_c, device_c, sizeof(float)*vector_size, hipMemcpyDeviceToHost); float sum = 0; for(i=0; i<vector_size; i++) { sum += host_c[i]; } hipEventRecord(stop,0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime,start,stop); printf("%f\n", elapsedTime); hipFree(device_a); hipFree(device_b); hipFree(device_c); free(host_a); free(host_b); free(host_c); return 0; }
a0dac4ce3a7ddcf5435354c57ead3a601a5034a4.cu
#include <stdio.h> #include <stdlib.h> #include <math.h> __global__ void vector_add(float *a, float *b, float *c, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if(tid < n) c[tid] = a[tid] + b[tid]; } int main( int argc, char* argv[] ) { cudaEvent_t start,stop; float elapsedTime; cudaEventCreate(&start); cudaEventCreate(&stop); if(argc < 2) { printf("need exactly 1 argument\n"); return 0; } int vector_size = atoi(argv[1]); float* host_a = (float*)malloc(sizeof(float) * vector_size); float* host_b = (float*)malloc(sizeof(float) * vector_size); float* host_c = (float*)malloc(sizeof(float) * vector_size); float* device_a; float* device_b; float* device_c; cudaMalloc(&device_a, sizeof(float)*vector_size); cudaMalloc(&device_b, sizeof(float)*vector_size); cudaMalloc(&device_c, sizeof(float)*vector_size); int i; for(i=0; i<vector_size; i++) { host_a[i] = 1; host_b[i] = 1; //host_a[i] = rand() % vector_size; //host_b[i] = rand() % vector_size; } cudaEventRecord(start,0); cudaMemcpy( device_a, host_a, sizeof(float)*vector_size, cudaMemcpyHostToDevice); cudaMemcpy( device_b, host_a, sizeof(float)*vector_size, cudaMemcpyHostToDevice); int block_size = 1024; int grid_size = vector_size / block_size; if(vector_size % block_size) { grid_size = grid_size + 1; } vector_add<<<grid_size, block_size>>>(device_a, device_b, device_c, vector_size); cudaMemcpy( host_c, device_c, sizeof(float)*vector_size, cudaMemcpyDeviceToHost); float sum = 0; for(i=0; i<vector_size; i++) { sum += host_c[i]; } cudaEventRecord(stop,0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime,start,stop); printf("%f\n", elapsedTime); cudaFree(device_a); cudaFree(device_b); cudaFree(device_c); free(host_a); free(host_b); free(host_c); return 0; }
ed860ceb14a95405fb5de1566593f9e66365ddd9.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <helper_cuda.h> #include <helper_math.h> typedef unsigned int uint; typedef unsigned char uchar; hipArray *d_volumeArray = 0; hipArray *d_transferFuncArray; typedef unsigned char VolumeType; //typedef unsigned short VolumeType; hipTextureObject_t texObject; // For 3D texture hipTextureObject_t transferTex; // For 1D transfer function texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, hipTextureObject_t tex, hipTextureObject_t transferTex) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D<float>(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); //sample *= 64.0f; // scale for 10-bit data // lookup in transfer function texture float4 col = tex1D<float4>(transferTex, (sample-transferOffset)*transferScale); col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } extern "C" void setTextureFilterMode(bool bLinearFilter) { if (texObject) { checkCudaErrors(hipDestroyTextureObject(texObject)); } hipResourceDesc texRes; memset(&texRes,0,sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = d_volumeArray; hipTextureDesc texDescr; memset(&texDescr,0,sizeof(hipTextureDesc)); texDescr.normalizedCoords = true; texDescr.filterMode = bLinearFilter ? hipFilterModeLinear : hipFilterModePoint; texDescr.addressMode[0] = hipAddressModeWrap; texDescr.addressMode[1] = hipAddressModeWrap; texDescr.addressMode[2] = hipAddressModeWrap; texDescr.readMode = hipReadModeNormalizedFloat; checkCudaErrors(hipCreateTextureObject(&texObject, &texRes, &texDescr, NULL)); } extern "C" void initCuda(void *h_volume, hipExtent volumeSize) { // create 3D array hipChannelFormatDesc channelDesc = hipCreateChannelDesc<VolumeType>(); checkCudaErrors(hipMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize)); // copy data to 3D array hipMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_hipPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = hipMemcpyHostToDevice; checkCudaErrors(hipMemcpy3D(&copyParams)); hipResourceDesc texRes; memset(&texRes, 0, sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = d_volumeArray; hipTextureDesc texDescr; memset(&texDescr, 0, sizeof(hipTextureDesc)); texDescr.normalizedCoords = true; // access with normalized texture coordinates texDescr.filterMode = hipFilterModeLinear; // linear interpolation texDescr.addressMode[0] = hipAddressModeClamp; // clamp texture coordinates texDescr.addressMode[1] = hipAddressModeClamp; texDescr.addressMode[2] = hipAddressModeClamp; texDescr.readMode = hipReadModeNormalizedFloat; checkCudaErrors(hipCreateTextureObject(&texObject, &texRes, &texDescr, NULL)); // create transfer function texture float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 0.0, 0.0, }, }; hipChannelFormatDesc channelDesc2 = hipCreateChannelDesc<float4>(); hipArray *d_transferFuncArray; checkCudaErrors(hipMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(hipMemcpy2DToArray(d_transferFuncArray, 0, 0, transferFunc, 0, sizeof(transferFunc), 1, hipMemcpyHostToDevice)); memset(&texRes,0,sizeof(hipResourceDesc)); texRes.resType = hipResourceTypeArray; texRes.res.array.array = d_transferFuncArray; memset(&texDescr,0,sizeof(hipTextureDesc)); texDescr.normalizedCoords = true; // access with normalized texture coordinates texDescr.filterMode = hipFilterModeLinear; texDescr.addressMode[0] = hipAddressModeClamp; // wrap texture coordinates texDescr.readMode = hipReadModeElementType; checkCudaErrors(hipCreateTextureObject(&transferTex, &texRes, &texDescr, NULL)); } extern "C" void freeCudaBuffers() { checkCudaErrors(hipDestroyTextureObject(texObject)); checkCudaErrors(hipDestroyTextureObject(transferTex)); checkCudaErrors(hipFreeArray(d_volumeArray)); checkCudaErrors(hipFreeArray(d_transferFuncArray)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { hipLaunchKernelGGL(( d_render), dim3(gridSize), dim3(blockSize), 0, 0, d_output, imageW, imageH, density, brightness, transferOffset, transferScale, texObject, transferTex); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { checkCudaErrors(hipMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix)); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
ed860ceb14a95405fb5de1566593f9e66365ddd9.cu
/* * Copyright 1993-2015 NVIDIA Corporation. All rights reserved. * * Please refer to the NVIDIA end user license agreement (EULA) associated * with this source code for terms and conditions that govern your use of * this software. Any use, reproduction, disclosure, or distribution of * this software and related documentation outside the terms of the EULA * is strictly prohibited. * */ // Simple 3D volume renderer #ifndef _VOLUMERENDER_KERNEL_CU_ #define _VOLUMERENDER_KERNEL_CU_ #include <helper_cuda.h> #include <helper_math.h> typedef unsigned int uint; typedef unsigned char uchar; cudaArray *d_volumeArray = 0; cudaArray *d_transferFuncArray; typedef unsigned char VolumeType; //typedef unsigned short VolumeType; cudaTextureObject_t texObject; // For 3D texture cudaTextureObject_t transferTex; // For 1D transfer function texture typedef struct { float4 m[3]; } float3x4; __constant__ float3x4 c_invViewMatrix; // inverse view matrix struct Ray { float3 o; // origin float3 d; // direction }; // intersect ray with a box // http://www.siggraph.org/education/materials/HyperGraph/raytrace/rtinter3.htm __device__ int intersectBox(Ray r, float3 boxmin, float3 boxmax, float *tnear, float *tfar) { // compute intersection of ray with all six bbox planes float3 invR = make_float3(1.0f) / r.d; float3 tbot = invR * (boxmin - r.o); float3 ttop = invR * (boxmax - r.o); // re-order intersections to find smallest and largest on each axis float3 tmin = fminf(ttop, tbot); float3 tmax = fmaxf(ttop, tbot); // find the largest tmin and the smallest tmax float largest_tmin = fmaxf(fmaxf(tmin.x, tmin.y), fmaxf(tmin.x, tmin.z)); float smallest_tmax = fminf(fminf(tmax.x, tmax.y), fminf(tmax.x, tmax.z)); *tnear = largest_tmin; *tfar = smallest_tmax; return smallest_tmax > largest_tmin; } // transform vector by matrix (no translation) __device__ float3 mul(const float3x4 &M, const float3 &v) { float3 r; r.x = dot(v, make_float3(M.m[0])); r.y = dot(v, make_float3(M.m[1])); r.z = dot(v, make_float3(M.m[2])); return r; } // transform vector by matrix with translation __device__ float4 mul(const float3x4 &M, const float4 &v) { float4 r; r.x = dot(v, M.m[0]); r.y = dot(v, M.m[1]); r.z = dot(v, M.m[2]); r.w = 1.0f; return r; } __device__ uint rgbaFloatToInt(float4 rgba) { rgba.x = __saturatef(rgba.x); // clamp to [0.0, 1.0] rgba.y = __saturatef(rgba.y); rgba.z = __saturatef(rgba.z); rgba.w = __saturatef(rgba.w); return (uint(rgba.w*255)<<24) | (uint(rgba.z*255)<<16) | (uint(rgba.y*255)<<8) | uint(rgba.x*255); } __global__ void d_render(uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale, cudaTextureObject_t tex, cudaTextureObject_t transferTex) { const int maxSteps = 500; const float tstep = 0.01f; const float opacityThreshold = 0.95f; const float3 boxMin = make_float3(-1.0f, -1.0f, -1.0f); const float3 boxMax = make_float3(1.0f, 1.0f, 1.0f); uint x = blockIdx.x*blockDim.x + threadIdx.x; uint y = blockIdx.y*blockDim.y + threadIdx.y; if ((x >= imageW) || (y >= imageH)) return; float u = (x / (float) imageW)*2.0f-1.0f; float v = (y / (float) imageH)*2.0f-1.0f; // calculate eye ray in world space Ray eyeRay; eyeRay.o = make_float3(mul(c_invViewMatrix, make_float4(0.0f, 0.0f, 0.0f, 1.0f))); eyeRay.d = normalize(make_float3(u, v, -2.0f)); eyeRay.d = mul(c_invViewMatrix, eyeRay.d); // find intersection with box float tnear, tfar; int hit = intersectBox(eyeRay, boxMin, boxMax, &tnear, &tfar); if (!hit) return; if (tnear < 0.0f) tnear = 0.0f; // clamp to near plane // march along ray from front to back, accumulating color float4 sum = make_float4(0.0f); float t = tnear; float3 pos = eyeRay.o + eyeRay.d*tnear; float3 step = eyeRay.d*tstep; for (int i=0; i<maxSteps; i++) { // read from 3D texture // remap position to [0, 1] coordinates float sample = tex3D<float>(tex, pos.x*0.5f+0.5f, pos.y*0.5f+0.5f, pos.z*0.5f+0.5f); //sample *= 64.0f; // scale for 10-bit data // lookup in transfer function texture float4 col = tex1D<float4>(transferTex, (sample-transferOffset)*transferScale); col.w *= density; // "under" operator for back-to-front blending //sum = lerp(sum, col, col.w); // pre-multiply alpha col.x *= col.w; col.y *= col.w; col.z *= col.w; // "over" operator for front-to-back blending sum = sum + col*(1.0f - sum.w); // exit early if opaque if (sum.w > opacityThreshold) break; t += tstep; if (t > tfar) break; pos += step; } sum *= brightness; // write output color d_output[y*imageW + x] = rgbaFloatToInt(sum); } extern "C" void setTextureFilterMode(bool bLinearFilter) { if (texObject) { checkCudaErrors(cudaDestroyTextureObject(texObject)); } cudaResourceDesc texRes; memset(&texRes,0,sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = d_volumeArray; cudaTextureDesc texDescr; memset(&texDescr,0,sizeof(cudaTextureDesc)); texDescr.normalizedCoords = true; texDescr.filterMode = bLinearFilter ? cudaFilterModeLinear : cudaFilterModePoint; texDescr.addressMode[0] = cudaAddressModeWrap; texDescr.addressMode[1] = cudaAddressModeWrap; texDescr.addressMode[2] = cudaAddressModeWrap; texDescr.readMode = cudaReadModeNormalizedFloat; checkCudaErrors(cudaCreateTextureObject(&texObject, &texRes, &texDescr, NULL)); } extern "C" void initCuda(void *h_volume, cudaExtent volumeSize) { // create 3D array cudaChannelFormatDesc channelDesc = cudaCreateChannelDesc<VolumeType>(); checkCudaErrors(cudaMalloc3DArray(&d_volumeArray, &channelDesc, volumeSize)); // copy data to 3D array cudaMemcpy3DParms copyParams = {0}; copyParams.srcPtr = make_cudaPitchedPtr(h_volume, volumeSize.width*sizeof(VolumeType), volumeSize.width, volumeSize.height); copyParams.dstArray = d_volumeArray; copyParams.extent = volumeSize; copyParams.kind = cudaMemcpyHostToDevice; checkCudaErrors(cudaMemcpy3D(&copyParams)); cudaResourceDesc texRes; memset(&texRes, 0, sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = d_volumeArray; cudaTextureDesc texDescr; memset(&texDescr, 0, sizeof(cudaTextureDesc)); texDescr.normalizedCoords = true; // access with normalized texture coordinates texDescr.filterMode = cudaFilterModeLinear; // linear interpolation texDescr.addressMode[0] = cudaAddressModeClamp; // clamp texture coordinates texDescr.addressMode[1] = cudaAddressModeClamp; texDescr.addressMode[2] = cudaAddressModeClamp; texDescr.readMode = cudaReadModeNormalizedFloat; checkCudaErrors(cudaCreateTextureObject(&texObject, &texRes, &texDescr, NULL)); // create transfer function texture float4 transferFunc[] = { { 0.0, 0.0, 0.0, 0.0, }, { 1.0, 0.0, 0.0, 1.0, }, { 1.0, 0.5, 0.0, 1.0, }, { 1.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 0.0, 1.0, }, { 0.0, 1.0, 1.0, 1.0, }, { 0.0, 0.0, 1.0, 1.0, }, { 1.0, 0.0, 1.0, 1.0, }, { 0.0, 0.0, 0.0, 0.0, }, }; cudaChannelFormatDesc channelDesc2 = cudaCreateChannelDesc<float4>(); cudaArray *d_transferFuncArray; checkCudaErrors(cudaMallocArray(&d_transferFuncArray, &channelDesc2, sizeof(transferFunc)/sizeof(float4), 1)); checkCudaErrors(cudaMemcpy2DToArray(d_transferFuncArray, 0, 0, transferFunc, 0, sizeof(transferFunc), 1, cudaMemcpyHostToDevice)); memset(&texRes,0,sizeof(cudaResourceDesc)); texRes.resType = cudaResourceTypeArray; texRes.res.array.array = d_transferFuncArray; memset(&texDescr,0,sizeof(cudaTextureDesc)); texDescr.normalizedCoords = true; // access with normalized texture coordinates texDescr.filterMode = cudaFilterModeLinear; texDescr.addressMode[0] = cudaAddressModeClamp; // wrap texture coordinates texDescr.readMode = cudaReadModeElementType; checkCudaErrors(cudaCreateTextureObject(&transferTex, &texRes, &texDescr, NULL)); } extern "C" void freeCudaBuffers() { checkCudaErrors(cudaDestroyTextureObject(texObject)); checkCudaErrors(cudaDestroyTextureObject(transferTex)); checkCudaErrors(cudaFreeArray(d_volumeArray)); checkCudaErrors(cudaFreeArray(d_transferFuncArray)); } extern "C" void render_kernel(dim3 gridSize, dim3 blockSize, uint *d_output, uint imageW, uint imageH, float density, float brightness, float transferOffset, float transferScale) { d_render<<<gridSize, blockSize>>>(d_output, imageW, imageH, density, brightness, transferOffset, transferScale, texObject, transferTex); } extern "C" void copyInvViewMatrix(float *invViewMatrix, size_t sizeofMatrix) { checkCudaErrors(cudaMemcpyToSymbol(c_invViewMatrix, invViewMatrix, sizeofMatrix)); } #endif // #ifndef _VOLUMERENDER_KERNEL_CU_
227bbd58a98238c44d98dd8447ad36e7165828e1.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <vector> #include "lite/backends/cuda/math/transpose.h" #include "lite/core/op_registry.h" #include "lite/kernels/cuda/search_grnn_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { using Tensor = lite::Tensor; template <typename Dtype> __global__ void trans_map2out( Dtype* output, const Dtype* input, const int* map, int count, int lastdim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < count) { int seq = tid / lastdim; output[map[seq] * lastdim + tid % lastdim] = input[tid]; } } template <typename Dtype> __global__ void trans_map2in( Dtype* output, const Dtype* input, const int* map, int count, int lastdim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < count) { int seq = tid / lastdim; output[tid] = input[map[seq] * lastdim + tid % lastdim]; } } template <typename Dtype> void trans_map2out_cfunc(const Dtype* input, Dtype* output, int word_size, int seq_sum, hipStream_t stream, int* dev_map_vec) { int count = seq_sum * word_size; int block_dim = count; int grid_dim = 1; if (count > 1024) { block_dim = 256; grid_dim = (count + block_dim - 1) / block_dim; } hipLaunchKernelGGL(( trans_map2out), dim3(grid_dim), dim3(block_dim), 0, stream, output, input, dev_map_vec, count, word_size); } template <typename Dtype> void trans_map2in_cfunc(const Dtype* input, Dtype* output, int hidden_size, int seq_sum, hipStream_t stream, int* dev_map_vec) { int count = seq_sum * hidden_size; int block_dim = count; int grid_dim = 1; if (count > 1024) { block_dim = 256; grid_dim = (count + block_dim - 1) / block_dim; } hipLaunchKernelGGL(( trans_map2in), dim3(grid_dim), dim3(block_dim), 0, stream, output, input, dev_map_vec, count, hidden_size); } template <typename Dtype> void SeqSortedseqTranseUtil::seq_2_sorted_seq(const Dtype* input, Dtype* output, int word_size, hipStream_t stream) { int seq_sum = _map_vec.size(); trans_map2out_cfunc(input, output, word_size, seq_sum, stream, _dev_map_vec); } template <typename Dtype> void SeqSortedseqTranseUtil::sorted_seq_2_seq(const Dtype* input, Dtype* output, int hidden_size, hipStream_t stream) { int seq_sum = _map_vec.size(); trans_map2in_cfunc(input, output, hidden_size, seq_sum, stream, _dev_map_vec); } bool SeqSortedseqTranseUtil::get_sorted_map(const std::vector<int>& offset_vec, hipStream_t stream_id) { int batch_size = offset_vec.size() - 1; int word_sum = offset_vec[offset_vec.size() - 1]; std::vector<int> length_vec(batch_size); _length_index.resize(batch_size); int emit_length = 0; if (batch_size == 1) { emit_length = offset_vec[1] - offset_vec[0]; _emit_offset_vec.resize(emit_length + 1); for (int i = 0; i <= emit_length; ++i) { _emit_offset_vec[i] = i; } return false; } int max_len = 0; for (int i = 0; i < offset_vec.size() - 1; ++i) { int len = offset_vec[i + 1] - offset_vec[i]; max_len = max_len > len ? max_len : len; length_vec[i] = len; _length_index[i] = i; } emit_length = max_len; if (max_len == 1) { _emit_offset_vec.resize(2); _emit_offset_vec[0] = 0; _emit_offset_vec[1] = emit_length * batch_size; return false; } std::sort(_length_index.begin(), _length_index.end(), [&length_vec](int i1, int i2) { return length_vec[i1] > length_vec[i2]; }); _emit_offset_vec.resize(max_len + 1); _map_vec.resize(word_sum); if (word_sum > _dev_map_vec_length) { if (_dev_map_vec != nullptr) { TargetWrapperCuda::Free(static_cast<void*>(_dev_map_vec)); } _dev_map_vec = static_cast<int*>(TargetWrapperCuda::Malloc(sizeof(int) * word_sum)); _dev_map_vec_length = word_sum; } int target_word_id = 0; std::vector<int> length_vec_cnt = length_vec; int last_batch_size = batch_size; for (int word_id_in_seq = 0; word_id_in_seq < max_len; word_id_in_seq++) { _emit_offset_vec[word_id_in_seq] = target_word_id; for (int batch_id = 0; batch_id < last_batch_size; batch_id++) { int old_batch_id = _length_index[batch_id]; if (length_vec_cnt[old_batch_id] > 0) { int inner_word_id_in_seq = word_id_in_seq; if (_is_reverse) { inner_word_id_in_seq = length_vec[old_batch_id] - 1 - word_id_in_seq; } int old_word_id = offset_vec[old_batch_id] + inner_word_id_in_seq; _map_vec[old_word_id] = target_word_id; length_vec_cnt[old_batch_id]--; target_word_id++; } else { last_batch_size--; break; } } } TargetWrapperCuda::MemcpyAsync(_dev_map_vec, _map_vec.data(), sizeof(int) * word_sum, IoDirection::HtoD, stream_id); _emit_offset_vec[max_len] = word_sum; _emit_length = emit_length; return true; } template <typename Dtype> __global__ void transpose_2d(Dtype* output, const Dtype* input, int m, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < m * n) { int i = tid / n; int j = tid % m; output[tid] = input[j * n + i]; } } void SearchGrnnCompute::WeightsPreprocess() { auto& param = this->Param<param_t>(); auto& context = this->ctx_->template As<CUDAContext>(); auto stream = context.exec_stream(); DDim idims = param.wi->dims(); DDim hdims = param.wh->dims(); _wi.Resize({idims[2], idims[0], idims[1]}); _wh.Resize({hdims[2], hdims[0], hdims[1]}); lite::cuda::math::Transpose<float> trans; trans.transpose(_wi.mutable_data<float>(TARGET(kCUDA)), param.wi->data<float>(), idims.Vectorize(), {2, 0, 1}, &stream); trans.transpose(_wh.mutable_data<float>(TARGET(kCUDA)) + hdims[1] * hdims[2], param.wh->data<float>() + hdims[1] * hdims[2], {hdims[0] - 1, hdims[1], hdims[2]}, {2, 0, 1}, &stream); trans.transpose(_wh.mutable_data<float>(TARGET(kCUDA)), param.wh->data<float>(), {hdims[1], hdims[2]}, {1, 0}, &stream); // int thread_num = 512; // int block_num = (hdims[1] * hdims[2] + thread_num - 1) / thread_num; // transpose_2d<<<block_num, thread_num, 0, stream>>>( // _wh.mutable_data<float>(TARGET(kCUDA)), // param.wh->data<float>(), // hdims[1], // hdims[2]); } void SearchGrnnCompute::PrepareForRun() { auto& param = this->Param<param_t>(); auto& context = this->ctx_->template As<CUDAContext>(); auto stream = context.exec_stream(); gemm_impl_.reset(new lite::cuda::math::Gemm<float, float>); _seq_util = SeqSortedseqTranseUtil(); WeightsPreprocess(); int hidden_size = param.num_hidden; int word_size = param.num_input; int weights_h2h_size = hidden_size * hidden_size * 3; int weights_i2h_size = hidden_size * word_size * 3; lite::Tensor temp_weights_h2h_ori; lite::Tensor temp_weights_h2h_swarp; temp_weights_h2h_ori.Resize({weights_h2h_size}); temp_weights_h2h_swarp.Resize({weights_h2h_size}); TargetWrapperCuda::MemcpyAsync(temp_weights_h2h_ori.mutable_data<float>(), _wh.data<float>(), sizeof(float) * weights_h2h_size, IoDirection::DtoH, stream); hipStreamSynchronize(stream); float* temp_tensor_ptr = temp_weights_h2h_swarp.mutable_data<float>(); memcpy(temp_tensor_ptr, temp_weights_h2h_ori.data<float>(), sizeof(float) * hidden_size * hidden_size); float* rz_temp_tensor_ptr = temp_tensor_ptr + hidden_size * hidden_size; const float* rz_weights_tensor_ptr = temp_weights_h2h_ori.data<float>() + hidden_size * hidden_size; for (int row = 0; row < hidden_size; row++) { for (int block = 0; block < 2; block++) { int block_offset = block * hidden_size; for (int cow = 0; cow < hidden_size; cow++) { rz_temp_tensor_ptr[block * hidden_size * hidden_size + row * hidden_size + cow] = rz_weights_tensor_ptr[row * (2 * hidden_size) + cow + block_offset]; } } } float* orz_temp_tensor_ptr = temp_tensor_ptr; float* orz_weights_tensor_ptr = temp_weights_h2h_ori.mutable_data<float>(); for (int row = 0; row < hidden_size; row++) { for (int block = 0; block < 3; block++) { int block_offset = block * hidden_size; for (int cow = 0; cow < hidden_size; cow++) { orz_weights_tensor_ptr[row * (3 * hidden_size) + cow + block_offset] = orz_temp_tensor_ptr[block * hidden_size * hidden_size + row * hidden_size + cow]; } } } _temp_weights_h2h.Resize({weights_h2h_size}); TargetWrapperCuda::MemcpyAsync( _temp_weights_h2h.mutable_data<float>(TARGET(kCUDA)), temp_weights_h2h_ori.data<float>(), sizeof(float) * weights_h2h_size, IoDirection::HtoD, stream); hipStreamSynchronize(stream); } template <typename Dtype> static inline __device__ Dtype Sigmoid(const Dtype a) { return static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-a)); } template <typename Dtype> static inline __device__ Dtype Tanh(const Dtype a) { Dtype tmp = static_cast<Dtype>(-2.0) * a; return (static_cast<Dtype>(2.0) / (static_cast<Dtype>(1.0) + expf(tmp))) - static_cast<Dtype>(1.0); } template <typename Dtype> __global__ void cal_cudnn_kernel(const Dtype* w_x_r, const Dtype* w_x_z, const Dtype* w_x_o, const Dtype* w_h_r, const Dtype* w_h_z, const Dtype* w_h_o, int hidden_size, int batch_size, Dtype* output, const Dtype* hidden_pre) { const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int batch_id = thread_id / hidden_size; const int index = thread_id % hidden_size; if (index < hidden_size && batch_id < batch_size) { int w_base_index = batch_id * hidden_size * 3 + index; int h_base_index = batch_id * hidden_size + index; Dtype hidden_pre_value = hidden_pre[h_base_index]; Dtype r = Sigmoid(w_x_r[w_base_index] + w_h_r[w_base_index]); Dtype z = Sigmoid(w_x_z[w_base_index] + w_h_z[w_base_index]); Dtype _h = Tanh(w_x_o[w_base_index] + w_h_o[w_base_index] * r); output[h_base_index] = (static_cast<Dtype>(1.0) - z) * _h + z * hidden_pre_value; } } void SearchGrnnCompute::Run() { auto& param = this->Param<param_t>(); auto& context = this->ctx_->template As<CUDAContext>(); auto stream = context.exec_stream(); auto* x = param.x; LoD offset_vec_vec = x->lod(); std::vector<int> offset(offset_vec_vec[offset_vec_vec.size() - 1].size()); for (size_t i = 0; i < offset_vec_vec[offset_vec_vec.size() - 1].size(); ++i) { offset[i] = static_cast<int>(offset_vec_vec[offset_vec_vec.size() - 1][i]); } const float* x_data = x->data<float>(); auto* dout = param.out; std::vector<int64_t> out_dims_vec{x->dims()[0], param.num_hidden}; dout->Resize(out_dims_vec); float* dout_data = dout->mutable_data<float>(TARGET(kCUDA)); auto* wi = &_wi; auto* wh = &_wh; const float* weights_i2h = wi->data<float>(); const float* weights_h2h = wh->data<float>(); int batch_size = offset.size() - 1; int seq_sum = x->dims()[0]; bool is_batched = offset.size() > 2; int hidden_size = param.num_hidden; int word_size = param.num_input; int o_offset = 0; int r_offset = 1; int z_offset = 2; is_batched = _seq_util.get_sorted_map(offset, stream); std::vector<int> emit_offset_vec = _seq_util.get_emit_offset_vec(); int emit_length = emit_offset_vec.size() - 1; if (is_batched) { std::vector<int64_t> seq_shape{1, 1, seq_sum, word_size}; _temp_tensor_in.Resize(seq_shape); std::vector<int64_t> seq_out_shape{1, 1, seq_sum, hidden_size}; _temp_tensor_out.Resize(seq_out_shape); _seq_util.seq_2_sorted_seq( x_data, _temp_tensor_in.mutable_data<float>(TARGET(kCUDA)), word_size, stream); x_data = _temp_tensor_in.data<float>(); dout_data = _temp_tensor_out.mutable_data<float>(TARGET(kCUDA)); } std::vector<int64_t> shape_wx({seq_sum, 1, 3, hidden_size}); _temp_wx.Resize(shape_wx); std::vector<int64_t> shape_wh({1, batch_size, 3, hidden_size}); _temp_wh.Resize(shape_wh); gemm_impl_->init(false, false, seq_sum, 3 * hidden_size, word_size, &context); gemm_impl_->run(1.0f, 0.0f, x_data, weights_i2h, _temp_wx.mutable_data<float>(TARGET(kCUDA)), &context); std::vector<int64_t> shape_zero({batch_size * hidden_size}); _temp_zero.Resize(shape_zero); TargetWrapperCuda::MemsetAsync(_temp_zero.mutable_data<float>(TARGET(kCUDA)), 0, sizeof(float) * batch_size * hidden_size, stream); const float* h = _temp_zero.data<float>(); for (int word_id = 0; word_id < emit_length; word_id++) { int real_word_id = word_id; int last_word_id = word_id - 1; int emit_word_id_start = emit_offset_vec[real_word_id]; int emit_word_id_end = emit_offset_vec[real_word_id + 1]; int emit_word_length = emit_word_id_end - emit_word_id_start; const float* hidden_in; float* hidden_out = dout_data + emit_offset_vec[real_word_id] * hidden_size; if (word_id == 0) { hidden_in = h; } else { hidden_in = dout_data + emit_offset_vec[last_word_id] * hidden_size; } float* w_x_r = _temp_wx.mutable_data<float>(TARGET(kCUDA)) + r_offset * hidden_size + emit_word_id_start * hidden_size * 3; float* w_x_z = _temp_wx.mutable_data<float>(TARGET(kCUDA)) + z_offset * hidden_size + emit_word_id_start * hidden_size * 3; float* w_x_o = _temp_wx.mutable_data<float>(TARGET(kCUDA)) + o_offset * hidden_size + emit_word_id_start * hidden_size * 3; float* w_h_r = _temp_wh.mutable_data<float>(TARGET(kCUDA)) + r_offset * hidden_size; float* w_h_z = _temp_wh.mutable_data<float>(TARGET(kCUDA)) + z_offset * hidden_size; float* w_h_o = _temp_wh.mutable_data<float>(TARGET(kCUDA)) + o_offset * hidden_size; gemm_impl_->init( false, false, emit_word_length, 3 * hidden_size, hidden_size, &context); gemm_impl_->run(1.0f, 0.0f, hidden_in, _temp_weights_h2h.data<float>(), _temp_wh.mutable_data<float>(TARGET(kCUDA)), &context); const float* w_o = weights_h2h; const int block_dim = 512; const int grid_dim = (emit_word_length * hidden_size + block_dim - 1) / block_dim; hipLaunchKernelGGL(( cal_cudnn_kernel), dim3(grid_dim), dim3(block_dim), 0, stream, w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o, hidden_size, emit_word_length, hidden_out, hidden_in); } if (is_batched) { _seq_util.sorted_seq_2_seq(_temp_tensor_out.data<float>(), dout->mutable_data<float>(TARGET(kCUDA)), hidden_size, stream); } dout->set_lod(x->lod()); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(search_grnn, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::SearchGrnnCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindInput("Wi", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindInput("Wh", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("tmp_buffer", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("idx_sorted_by_width", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("layout_input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .Finalize();
227bbd58a98238c44d98dd8447ad36e7165828e1.cu
/* Copyright (c) 2019 PaddlePaddle Authors. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. */ #pragma once #include <algorithm> #include <vector> #include "lite/backends/cuda/math/transpose.h" #include "lite/core/op_registry.h" #include "lite/kernels/cuda/search_grnn_compute.h" namespace paddle { namespace lite { namespace kernels { namespace cuda { using Tensor = lite::Tensor; template <typename Dtype> __global__ void trans_map2out( Dtype* output, const Dtype* input, const int* map, int count, int lastdim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < count) { int seq = tid / lastdim; output[map[seq] * lastdim + tid % lastdim] = input[tid]; } } template <typename Dtype> __global__ void trans_map2in( Dtype* output, const Dtype* input, const int* map, int count, int lastdim) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < count) { int seq = tid / lastdim; output[tid] = input[map[seq] * lastdim + tid % lastdim]; } } template <typename Dtype> void trans_map2out_cfunc(const Dtype* input, Dtype* output, int word_size, int seq_sum, cudaStream_t stream, int* dev_map_vec) { int count = seq_sum * word_size; int block_dim = count; int grid_dim = 1; if (count > 1024) { block_dim = 256; grid_dim = (count + block_dim - 1) / block_dim; } trans_map2out<<<grid_dim, block_dim, 0, stream>>>( output, input, dev_map_vec, count, word_size); } template <typename Dtype> void trans_map2in_cfunc(const Dtype* input, Dtype* output, int hidden_size, int seq_sum, cudaStream_t stream, int* dev_map_vec) { int count = seq_sum * hidden_size; int block_dim = count; int grid_dim = 1; if (count > 1024) { block_dim = 256; grid_dim = (count + block_dim - 1) / block_dim; } trans_map2in<<<grid_dim, block_dim, 0, stream>>>( output, input, dev_map_vec, count, hidden_size); } template <typename Dtype> void SeqSortedseqTranseUtil::seq_2_sorted_seq(const Dtype* input, Dtype* output, int word_size, cudaStream_t stream) { int seq_sum = _map_vec.size(); trans_map2out_cfunc(input, output, word_size, seq_sum, stream, _dev_map_vec); } template <typename Dtype> void SeqSortedseqTranseUtil::sorted_seq_2_seq(const Dtype* input, Dtype* output, int hidden_size, cudaStream_t stream) { int seq_sum = _map_vec.size(); trans_map2in_cfunc(input, output, hidden_size, seq_sum, stream, _dev_map_vec); } bool SeqSortedseqTranseUtil::get_sorted_map(const std::vector<int>& offset_vec, cudaStream_t stream_id) { int batch_size = offset_vec.size() - 1; int word_sum = offset_vec[offset_vec.size() - 1]; std::vector<int> length_vec(batch_size); _length_index.resize(batch_size); int emit_length = 0; if (batch_size == 1) { emit_length = offset_vec[1] - offset_vec[0]; _emit_offset_vec.resize(emit_length + 1); for (int i = 0; i <= emit_length; ++i) { _emit_offset_vec[i] = i; } return false; } int max_len = 0; for (int i = 0; i < offset_vec.size() - 1; ++i) { int len = offset_vec[i + 1] - offset_vec[i]; max_len = max_len > len ? max_len : len; length_vec[i] = len; _length_index[i] = i; } emit_length = max_len; if (max_len == 1) { _emit_offset_vec.resize(2); _emit_offset_vec[0] = 0; _emit_offset_vec[1] = emit_length * batch_size; return false; } std::sort(_length_index.begin(), _length_index.end(), [&length_vec](int i1, int i2) { return length_vec[i1] > length_vec[i2]; }); _emit_offset_vec.resize(max_len + 1); _map_vec.resize(word_sum); if (word_sum > _dev_map_vec_length) { if (_dev_map_vec != nullptr) { TargetWrapperCuda::Free(static_cast<void*>(_dev_map_vec)); } _dev_map_vec = static_cast<int*>(TargetWrapperCuda::Malloc(sizeof(int) * word_sum)); _dev_map_vec_length = word_sum; } int target_word_id = 0; std::vector<int> length_vec_cnt = length_vec; int last_batch_size = batch_size; for (int word_id_in_seq = 0; word_id_in_seq < max_len; word_id_in_seq++) { _emit_offset_vec[word_id_in_seq] = target_word_id; for (int batch_id = 0; batch_id < last_batch_size; batch_id++) { int old_batch_id = _length_index[batch_id]; if (length_vec_cnt[old_batch_id] > 0) { int inner_word_id_in_seq = word_id_in_seq; if (_is_reverse) { inner_word_id_in_seq = length_vec[old_batch_id] - 1 - word_id_in_seq; } int old_word_id = offset_vec[old_batch_id] + inner_word_id_in_seq; _map_vec[old_word_id] = target_word_id; length_vec_cnt[old_batch_id]--; target_word_id++; } else { last_batch_size--; break; } } } TargetWrapperCuda::MemcpyAsync(_dev_map_vec, _map_vec.data(), sizeof(int) * word_sum, IoDirection::HtoD, stream_id); _emit_offset_vec[max_len] = word_sum; _emit_length = emit_length; return true; } template <typename Dtype> __global__ void transpose_2d(Dtype* output, const Dtype* input, int m, int n) { int tid = blockIdx.x * blockDim.x + threadIdx.x; if (tid < m * n) { int i = tid / n; int j = tid % m; output[tid] = input[j * n + i]; } } void SearchGrnnCompute::WeightsPreprocess() { auto& param = this->Param<param_t>(); auto& context = this->ctx_->template As<CUDAContext>(); auto stream = context.exec_stream(); DDim idims = param.wi->dims(); DDim hdims = param.wh->dims(); _wi.Resize({idims[2], idims[0], idims[1]}); _wh.Resize({hdims[2], hdims[0], hdims[1]}); lite::cuda::math::Transpose<float> trans; trans.transpose(_wi.mutable_data<float>(TARGET(kCUDA)), param.wi->data<float>(), idims.Vectorize(), {2, 0, 1}, &stream); trans.transpose(_wh.mutable_data<float>(TARGET(kCUDA)) + hdims[1] * hdims[2], param.wh->data<float>() + hdims[1] * hdims[2], {hdims[0] - 1, hdims[1], hdims[2]}, {2, 0, 1}, &stream); trans.transpose(_wh.mutable_data<float>(TARGET(kCUDA)), param.wh->data<float>(), {hdims[1], hdims[2]}, {1, 0}, &stream); // int thread_num = 512; // int block_num = (hdims[1] * hdims[2] + thread_num - 1) / thread_num; // transpose_2d<<<block_num, thread_num, 0, stream>>>( // _wh.mutable_data<float>(TARGET(kCUDA)), // param.wh->data<float>(), // hdims[1], // hdims[2]); } void SearchGrnnCompute::PrepareForRun() { auto& param = this->Param<param_t>(); auto& context = this->ctx_->template As<CUDAContext>(); auto stream = context.exec_stream(); gemm_impl_.reset(new lite::cuda::math::Gemm<float, float>); _seq_util = SeqSortedseqTranseUtil(); WeightsPreprocess(); int hidden_size = param.num_hidden; int word_size = param.num_input; int weights_h2h_size = hidden_size * hidden_size * 3; int weights_i2h_size = hidden_size * word_size * 3; lite::Tensor temp_weights_h2h_ori; lite::Tensor temp_weights_h2h_swarp; temp_weights_h2h_ori.Resize({weights_h2h_size}); temp_weights_h2h_swarp.Resize({weights_h2h_size}); TargetWrapperCuda::MemcpyAsync(temp_weights_h2h_ori.mutable_data<float>(), _wh.data<float>(), sizeof(float) * weights_h2h_size, IoDirection::DtoH, stream); cudaStreamSynchronize(stream); float* temp_tensor_ptr = temp_weights_h2h_swarp.mutable_data<float>(); memcpy(temp_tensor_ptr, temp_weights_h2h_ori.data<float>(), sizeof(float) * hidden_size * hidden_size); float* rz_temp_tensor_ptr = temp_tensor_ptr + hidden_size * hidden_size; const float* rz_weights_tensor_ptr = temp_weights_h2h_ori.data<float>() + hidden_size * hidden_size; for (int row = 0; row < hidden_size; row++) { for (int block = 0; block < 2; block++) { int block_offset = block * hidden_size; for (int cow = 0; cow < hidden_size; cow++) { rz_temp_tensor_ptr[block * hidden_size * hidden_size + row * hidden_size + cow] = rz_weights_tensor_ptr[row * (2 * hidden_size) + cow + block_offset]; } } } float* orz_temp_tensor_ptr = temp_tensor_ptr; float* orz_weights_tensor_ptr = temp_weights_h2h_ori.mutable_data<float>(); for (int row = 0; row < hidden_size; row++) { for (int block = 0; block < 3; block++) { int block_offset = block * hidden_size; for (int cow = 0; cow < hidden_size; cow++) { orz_weights_tensor_ptr[row * (3 * hidden_size) + cow + block_offset] = orz_temp_tensor_ptr[block * hidden_size * hidden_size + row * hidden_size + cow]; } } } _temp_weights_h2h.Resize({weights_h2h_size}); TargetWrapperCuda::MemcpyAsync( _temp_weights_h2h.mutable_data<float>(TARGET(kCUDA)), temp_weights_h2h_ori.data<float>(), sizeof(float) * weights_h2h_size, IoDirection::HtoD, stream); cudaStreamSynchronize(stream); } template <typename Dtype> static inline __device__ Dtype Sigmoid(const Dtype a) { return static_cast<Dtype>(1.0) / (static_cast<Dtype>(1.0) + expf(-a)); } template <typename Dtype> static inline __device__ Dtype Tanh(const Dtype a) { Dtype tmp = static_cast<Dtype>(-2.0) * a; return (static_cast<Dtype>(2.0) / (static_cast<Dtype>(1.0) + expf(tmp))) - static_cast<Dtype>(1.0); } template <typename Dtype> __global__ void cal_cudnn_kernel(const Dtype* w_x_r, const Dtype* w_x_z, const Dtype* w_x_o, const Dtype* w_h_r, const Dtype* w_h_z, const Dtype* w_h_o, int hidden_size, int batch_size, Dtype* output, const Dtype* hidden_pre) { const int thread_id = blockIdx.x * blockDim.x + threadIdx.x; const int batch_id = thread_id / hidden_size; const int index = thread_id % hidden_size; if (index < hidden_size && batch_id < batch_size) { int w_base_index = batch_id * hidden_size * 3 + index; int h_base_index = batch_id * hidden_size + index; Dtype hidden_pre_value = hidden_pre[h_base_index]; Dtype r = Sigmoid(w_x_r[w_base_index] + w_h_r[w_base_index]); Dtype z = Sigmoid(w_x_z[w_base_index] + w_h_z[w_base_index]); Dtype _h = Tanh(w_x_o[w_base_index] + w_h_o[w_base_index] * r); output[h_base_index] = (static_cast<Dtype>(1.0) - z) * _h + z * hidden_pre_value; } } void SearchGrnnCompute::Run() { auto& param = this->Param<param_t>(); auto& context = this->ctx_->template As<CUDAContext>(); auto stream = context.exec_stream(); auto* x = param.x; LoD offset_vec_vec = x->lod(); std::vector<int> offset(offset_vec_vec[offset_vec_vec.size() - 1].size()); for (size_t i = 0; i < offset_vec_vec[offset_vec_vec.size() - 1].size(); ++i) { offset[i] = static_cast<int>(offset_vec_vec[offset_vec_vec.size() - 1][i]); } const float* x_data = x->data<float>(); auto* dout = param.out; std::vector<int64_t> out_dims_vec{x->dims()[0], param.num_hidden}; dout->Resize(out_dims_vec); float* dout_data = dout->mutable_data<float>(TARGET(kCUDA)); auto* wi = &_wi; auto* wh = &_wh; const float* weights_i2h = wi->data<float>(); const float* weights_h2h = wh->data<float>(); int batch_size = offset.size() - 1; int seq_sum = x->dims()[0]; bool is_batched = offset.size() > 2; int hidden_size = param.num_hidden; int word_size = param.num_input; int o_offset = 0; int r_offset = 1; int z_offset = 2; is_batched = _seq_util.get_sorted_map(offset, stream); std::vector<int> emit_offset_vec = _seq_util.get_emit_offset_vec(); int emit_length = emit_offset_vec.size() - 1; if (is_batched) { std::vector<int64_t> seq_shape{1, 1, seq_sum, word_size}; _temp_tensor_in.Resize(seq_shape); std::vector<int64_t> seq_out_shape{1, 1, seq_sum, hidden_size}; _temp_tensor_out.Resize(seq_out_shape); _seq_util.seq_2_sorted_seq( x_data, _temp_tensor_in.mutable_data<float>(TARGET(kCUDA)), word_size, stream); x_data = _temp_tensor_in.data<float>(); dout_data = _temp_tensor_out.mutable_data<float>(TARGET(kCUDA)); } std::vector<int64_t> shape_wx({seq_sum, 1, 3, hidden_size}); _temp_wx.Resize(shape_wx); std::vector<int64_t> shape_wh({1, batch_size, 3, hidden_size}); _temp_wh.Resize(shape_wh); gemm_impl_->init(false, false, seq_sum, 3 * hidden_size, word_size, &context); gemm_impl_->run(1.0f, 0.0f, x_data, weights_i2h, _temp_wx.mutable_data<float>(TARGET(kCUDA)), &context); std::vector<int64_t> shape_zero({batch_size * hidden_size}); _temp_zero.Resize(shape_zero); TargetWrapperCuda::MemsetAsync(_temp_zero.mutable_data<float>(TARGET(kCUDA)), 0, sizeof(float) * batch_size * hidden_size, stream); const float* h = _temp_zero.data<float>(); for (int word_id = 0; word_id < emit_length; word_id++) { int real_word_id = word_id; int last_word_id = word_id - 1; int emit_word_id_start = emit_offset_vec[real_word_id]; int emit_word_id_end = emit_offset_vec[real_word_id + 1]; int emit_word_length = emit_word_id_end - emit_word_id_start; const float* hidden_in; float* hidden_out = dout_data + emit_offset_vec[real_word_id] * hidden_size; if (word_id == 0) { hidden_in = h; } else { hidden_in = dout_data + emit_offset_vec[last_word_id] * hidden_size; } float* w_x_r = _temp_wx.mutable_data<float>(TARGET(kCUDA)) + r_offset * hidden_size + emit_word_id_start * hidden_size * 3; float* w_x_z = _temp_wx.mutable_data<float>(TARGET(kCUDA)) + z_offset * hidden_size + emit_word_id_start * hidden_size * 3; float* w_x_o = _temp_wx.mutable_data<float>(TARGET(kCUDA)) + o_offset * hidden_size + emit_word_id_start * hidden_size * 3; float* w_h_r = _temp_wh.mutable_data<float>(TARGET(kCUDA)) + r_offset * hidden_size; float* w_h_z = _temp_wh.mutable_data<float>(TARGET(kCUDA)) + z_offset * hidden_size; float* w_h_o = _temp_wh.mutable_data<float>(TARGET(kCUDA)) + o_offset * hidden_size; gemm_impl_->init( false, false, emit_word_length, 3 * hidden_size, hidden_size, &context); gemm_impl_->run(1.0f, 0.0f, hidden_in, _temp_weights_h2h.data<float>(), _temp_wh.mutable_data<float>(TARGET(kCUDA)), &context); const float* w_o = weights_h2h; const int block_dim = 512; const int grid_dim = (emit_word_length * hidden_size + block_dim - 1) / block_dim; cal_cudnn_kernel<<<grid_dim, block_dim, 0, stream>>>(w_x_r, w_x_z, w_x_o, w_h_r, w_h_z, w_h_o, hidden_size, emit_word_length, hidden_out, hidden_in); } if (is_batched) { _seq_util.sorted_seq_2_seq(_temp_tensor_out.data<float>(), dout->mutable_data<float>(TARGET(kCUDA)), hidden_size, stream); } dout->set_lod(x->lod()); } } // namespace cuda } // namespace kernels } // namespace lite } // namespace paddle REGISTER_LITE_KERNEL(search_grnn, kCUDA, kFloat, kNCHW, paddle::lite::kernels::cuda::SearchGrnnCompute, def) .BindInput("X", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindInput("Wi", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindInput("Wh", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("Out", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("tmp_buffer", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("idx_sorted_by_width", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .BindOutput("layout_input", {LiteType::GetTensorTy(TARGET(kCUDA), PRECISION(kFloat), DATALAYOUT(kNCHW))}) .Finalize();
af9f32ae61a6a117fdba2a74e5c3ce80cc331f63.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "voxelGrid.h" /*================================== Volumetric fusion CUDA ==================================*/ __global__ void fusion_CUDA(voxel *vox, vec *vertexMap, vec *trans, mat *rot, vec *offset, const int n) { //Compute the index of the voxel const int index_vol = n*RESOL_VOL*RESOL_VOL + blockIdx.x * 512 + threadIdx.x; const int i = index_vol / (RESOL_VOL*RESOL_VOL); const int j = (index_vol % (RESOL_VOL*RESOL_VOL)) / RESOL_VOL; const int k = (index_vol % (RESOL_VOL*RESOL_VOL)) % RESOL_VOL; //Add offset to the voxel int a = i + offset->z; int b = j + offset->y; int c = k + offset->x; const int index = a * RESOL_Y*RESOL_Z + b * RESOL_Z + c; //Varify the condition if (a < 0 || a > RESOL_X || b < 0 || b > RESOL_Y || c < 0 || c > RESOL_Z || (vox[index].isActive && vox[index].weight > 8)) return; vec tmp, point_vox, dist; //Voxel centers in view space tmp.x = vox[index].pos.x - trans->x; tmp.y = vox[index].pos.y - trans->y; tmp.z = vox[index].pos.z - trans->z; point_vox.x = rot->x1 * tmp.x + rot->y1 * tmp.y + rot->z1 * tmp.z; point_vox.y = rot->x2 * tmp.x + rot->y2 * tmp.y + rot->z2 * tmp.z; point_vox.z = rot->x3 * tmp.x + rot->y3 * tmp.y + rot->z3 * tmp.z; //Projected onto the img plane if (point_vox.z > LOWER_BOUND){ int x = (int)(point_vox.x * FOCAL_LEN / point_vox.z + IMG_WIDTH/2); int y = (int)(point_vox.y * -FOCAL_LEN / point_vox.z + IMG_HEIGHT/2); int pix = IMG_WIDTH * y + x; //The pixel is within the depth map if (x > 0 && x < IMG_WIDTH && y > 0 && y < IMG_HEIGHT){ //Point cloud in view space dist.x = vertexMap[pix].x - point_vox.x; dist.y = vertexMap[pix].y - point_vox.y; dist.z = vertexMap[pix].z - point_vox.z; //Compute SDF float norm = sqrtf(dist.x * dist.x + dist.y * dist.y + dist.z * dist.z); float tsdf = (dist.z > 0) ? norm / TRUNCATE : -norm / TRUNCATE; //Truncate SDF if (tsdf >= 1) vox[index].tsdf = (vox[index].weight * vox[index].tsdf + 1) / (vox[index].weight + 1); else if (tsdf > -1){ vox[index].tsdf = (vox[index].weight * vox[index].tsdf + tsdf) / (vox[index].weight + 1); vox[index].isActive = true; } else vox[index].tsdf = (vox[index].weight * vox[index].tsdf - 1) / (vox[index].weight + 1); ++vox[index].weight; } } } /*================================== Ray-casting for a depth map CUDA ==================================*/ __global__ void rayCast_CUDA(voxel *vox, vec *vertexMap, vec *vertexMap_fused, vec *bound1, vec *bound2, vec *center, vec *trans, mat *rot) { //Compute the index of the pixel const int x = (blockIdx.x * 512 + threadIdx.x) % IMG_WIDTH; const int y = (blockIdx.x * 512 + threadIdx.x) / IMG_WIDTH; const int pix = IMG_WIDTH * y + x; vec tmp, rayDir, rayPos, rayStep; float norm; float tsdf_prev, tsdf_cur = 1; bool isActive_prev, isActive_cur = false; //Compute ray direction & position tmp.x = (x - IMG_WIDTH/2) / FOCAL_LEN; tmp.y = (IMG_HEIGHT/2 - y) / FOCAL_LEN; tmp.z = 1; rayDir.x = rot->x1 * tmp.x + rot->x2 * tmp.y + rot->x3 * tmp.z; rayDir.y = rot->y1 * tmp.x + rot->y2 * tmp.y + rot->y3 * tmp.z; rayDir.z = rot->z1 * tmp.x + rot->z2 * tmp.y + rot->z3 * tmp.z; norm = sqrtf(rayDir.x * rayDir.x + rayDir.y * rayDir.y + rayDir.z * rayDir.z); //Stepping value per iteration rayStep.x = STEP * rayDir.x / norm; rayStep.y = STEP * rayDir.y / norm; rayStep.z = STEP * rayDir.z / norm; rayPos.x = center->x + rayStep.x; rayPos.y = center->y + rayStep.y; rayPos.z = center->z + rayStep.z; //Start casting //If rayPos is in the volume while(rayPos.x > VOX_LEN && rayPos.x < MAX_LEN_X && rayPos.y > VOX_LEN && rayPos.y < MAX_LEN_Y && rayPos.z > VOX_LEN && rayPos.z < MAX_LEN_Z){ int index = ((int)(rayPos.z / VOX_LEN)) * RESOL_Y*RESOL_Z + ((int)(rayPos.y / VOX_LEN)) * RESOL_Z + ((int)(rayPos.x / VOX_LEN)); tsdf_prev = tsdf_cur; tsdf_cur = vox[index].tsdf; isActive_prev = isActive_cur; isActive_cur = vox[index].isActive; //Detect a zero-crossing if (tsdf_cur * tsdf_prev < 0 && (isActive_cur || isActive_prev)){ //Interpolation & Transform to the view space tmp.x = vox[index].pos.x - vox[index].tsdf * rayDir.x - trans->x; tmp.y = vox[index].pos.y - vox[index].tsdf * rayDir.y - trans->y; tmp.z = vox[index].pos.z - vox[index].tsdf * rayDir.z - trans->z; vertexMap_fused[pix].x = rot->x1 * tmp.x + rot->y1 * tmp.y + rot->z1 * tmp.z; vertexMap_fused[pix].y = rot->x2 * tmp.x + rot->y2 * tmp.y + rot->z2 * tmp.z; vertexMap_fused[pix].z = rot->x3 * tmp.x + rot->y3 * tmp.y + rot->z3 * tmp.z; return; } rayPos.x += rayStep.x; rayPos.y += rayStep.y; rayPos.z += rayStep.z; } //If the ray didn't hit a point, use the raw data vertexMap_fused[pix].x = vertexMap[pix].x; vertexMap_fused[pix].y = vertexMap[pix].y; vertexMap_fused[pix].z = vertexMap[pix].z; } /*================================== Ray-casting (All: x direction) ==================================*/ __global__ void rayCastAll_X_CUDA(voxel *vox, vec *pc, unsigned int *pc_count) { //Compute the index of the voxel const int a = (blockIdx.x * 512 + threadIdx.x) / RESOL_Z; const int b = (blockIdx.x * 512 + threadIdx.x) % RESOL_Z; float tsdf_prev, tsdf_cur = 0; bool isActive_prev, isActive_cur = false; int index_tmp, index; index_tmp = a * RESOL_Z + b; //Start casting for (int c = 1; c < RESOL_X; ++c){ index = index_tmp + c * RESOL_Y*RESOL_Z; tsdf_prev = tsdf_cur; tsdf_cur = vox[index].tsdf; isActive_prev = isActive_cur; isActive_cur = vox[index].isActive; //Detect a zero-crossing if ((isActive_cur || isActive_prev) && tsdf_cur * tsdf_prev < 0 && *pc_count < MAX_PC_COUNT){ //Interpolation vec p = vox[index].pos; p.x -= vox[index].tsdf; pc[atomicAdd(pc_count, 1)] = p; } } } /*================================== Ray-casting (All: y direction) ==================================*/ __global__ void rayCastAll_Y_CUDA(voxel *vox, vec *pc, unsigned int *pc_count) { //Compute the index of the voxel const int a = (blockIdx.x * 512 + threadIdx.x) / RESOL_X; const int b = (blockIdx.x * 512 + threadIdx.x) % RESOL_X; float tsdf_prev, tsdf_cur = 0; bool isActive_prev, isActive_cur = false; int index_tmp, index; index_tmp = a * RESOL_Y*RESOL_Z + b; //Start casting for (int c = 1; c < RESOL_Y; ++c){ index = index_tmp + c * RESOL_Z; tsdf_prev = tsdf_cur; tsdf_cur = vox[index].tsdf; isActive_prev = isActive_cur; isActive_cur = vox[index].isActive; //Detect a zero-crossing if ((isActive_cur || isActive_prev) && tsdf_cur * tsdf_prev < 0 && *pc_count < MAX_PC_COUNT){ //Interpolation vec p = vox[index].pos; p.y -= vox[index].tsdf; pc[atomicAdd(pc_count, 1)] = p; } } } /*================================== Ray-casting (All: z direction) ==================================*/ __global__ void rayCastAll_Z_CUDA(voxel *vox, vec *pc, unsigned int *pc_count) { //Compute the index of the voxel const int a = (blockIdx.x * 512 + threadIdx.x) / RESOL_X; const int b = (blockIdx.x * 512 + threadIdx.x) % RESOL_X; float tsdf_prev, tsdf_cur = 0; bool isActive_prev, isActive_cur = false; int index_tmp, index; index_tmp = a * RESOL_Y*RESOL_Z + b * RESOL_Z; //Start casting for (int c = 1; c < RESOL_Z; ++c){ index = index_tmp + c; tsdf_prev = tsdf_cur; tsdf_cur = vox[index].tsdf; isActive_prev = isActive_cur; isActive_cur = vox[index].isActive; //Detect a zero-crossing if ((isActive_cur || isActive_prev) && tsdf_cur * tsdf_prev < 0 && *pc_count < MAX_PC_COUNT){ //Interpolation vec p = vox[index].pos; p.z -= vox[index].tsdf; pc[atomicAdd(pc_count, 1)] = p; } } } /*====================================== Constructor ======================================*/ voxelGrid::voxelGrid() { //Initialize the properties center.x = MAX_LEN_X / 2.0f; center.y = MAX_LEN_Y / 2.0f; center.z = MAX_LEN_Z / 2.0f; dir.x = 0; dir.y = 0; dir.z = 1; trans.x = 0; trans.y = 0; trans.z = 0; rot.x1 = 1; rot.x2 = 0; rot.x3 = 0; rot.y1 = 0; rot.y2 = 1; rot.y3 = 0; rot.z1 = 0; rot.z2 = 0; rot.z3 = 1; //Allocate memory for each voxel vox = new voxel[RESOL_X*RESOL_Y*RESOL_Z]; pc = new vec[MAX_PC_COUNT]; //Initialize each voxel for (int i = 0; i < RESOL_X; i++) for (int j = 0; j < RESOL_Y; j++) for (int k = 0; k < RESOL_Z; k++){ int index = i*RESOL_Y*RESOL_Z + j*RESOL_Z + k; vox[index].pos.x = k * VOX_LEN + VOX_LEN / 2.f; vox[index].pos.y = j * VOX_LEN + VOX_LEN / 2.f, vox[index].pos.z = i * VOX_LEN + VOX_LEN / 2.f; vox[index].tsdf = 1; vox[index].weight = 0; vox[index].isActive = false; } pc_count = 0; //Allocate the GPU memory hipMalloc(&d_vox, RESOL_X*RESOL_Y*RESOL_Z * sizeof(voxel)); hipMalloc(&d_pc, MAX_PC_COUNT * sizeof(vec)); hipMalloc(&d_vertexMap, IMG_WIDTH*IMG_HEIGHT * sizeof(vec)); hipMalloc(&d_vertexMap_fused, IMG_WIDTH*IMG_HEIGHT * sizeof(vec)); hipMalloc(&d_bound1, sizeof(vec)); hipMalloc(&d_bound2, sizeof(vec)); hipMalloc(&d_center, sizeof(vec)); hipMalloc(&d_trans, sizeof(vec)); hipMalloc(&d_rot, sizeof(mat)); hipMalloc(&d_offset, sizeof(vec)); hipMalloc(&d_pc_count, sizeof(unsigned int)); //Upload data to the GPU memory hipMemcpy(d_vox, vox, RESOL_X*RESOL_Y*RESOL_Z * sizeof(voxel), hipMemcpyHostToDevice); hipMemcpy(d_pc, pc, MAX_PC_COUNT * sizeof(vec), hipMemcpyHostToDevice); hipMemcpy(d_trans, &trans, sizeof(vec), hipMemcpyHostToDevice); hipMemcpy(d_rot, &rot, sizeof(mat), hipMemcpyHostToDevice); hipMemcpy(d_offset, &offset, sizeof(vec), hipMemcpyHostToDevice); hipMemcpy(d_pc_count, &pc_count, sizeof(unsigned int), hipMemcpyHostToDevice); } /*====================================== Destructor ======================================*/ voxelGrid::~voxelGrid() { //Free the GPU memory hipFree(d_vox); hipFree(d_pc); hipFree(d_vertexMap); hipFree(d_vertexMap_fused); hipFree(d_bound1); hipFree(d_bound2); hipFree(d_center); hipFree(d_trans); hipFree(d_rot); hipFree(d_offset); hipFree(d_pc_count); delete [] vox; delete [] pc; } /*====================================== TSDF Fusion ======================================*/ void voxelGrid::fusion(vec *vertexMap) { //Upload data to GPU hipMemcpy(d_vertexMap, vertexMap, IMG_WIDTH*IMG_HEIGHT * sizeof(vec), hipMemcpyHostToDevice); //Start fusion //RESOL_VOL = 128 //for (int j = 0; j < RESOL_VOL; j += 8) // fusion_CUDA << <512, 512 >> > (d_vox, d_vertexMap, d_trans, d_rot, d_offset, j); //RESOL_VOL = 192 for (int j = 0; j < RESOL_VOL; j += 8) fusion_CUDA << <576, 512 >> > (d_vox, d_vertexMap, d_trans, d_rot, d_offset, j); //RESOL_VOL = 256 //for (int j = 0; j < RESOL_VOL; j += 4) // fusion_CUDA << <512, 512 >> > (d_vox, d_vertexMap, d_trans, d_rot, d_offset, j); } /*====================================== Ray-casting ======================================*/ void voxelGrid::rayCast(vec *vertexMap_fused) { //Compute boundary center.x = trans.x; center.y = trans.y; center.z = trans.z; bound1.x = (center.x <= VOX_LEN) ? center.x : VOX_LEN; bound1.y = (center.y <= VOX_LEN) ? center.y : VOX_LEN; bound1.z = (center.z <= VOX_LEN) ? center.z : VOX_LEN; bound2.x = (center.x >= MAX_LEN_X) ? center.x : MAX_LEN_X; bound2.y = (center.y >= MAX_LEN_Y) ? center.y : MAX_LEN_Y; bound2.z = (center.z >= MAX_LEN_Z) ? center.z : MAX_LEN_Z; //Upload data to the GPU memory hipMemcpy(d_center, &center, sizeof(vec), hipMemcpyHostToDevice); hipMemcpy(d_bound1, &bound1, sizeof(vec), hipMemcpyHostToDevice); hipMemcpy(d_bound2, &bound2, sizeof(vec), hipMemcpyHostToDevice); //Start ray-casting rayCast_CUDA << <168, 512 >> > (d_vox, d_vertexMap, d_vertexMap_fused, d_bound1, d_bound2, d_center, d_trans, d_rot); //Download data from the GPU memory hipMemcpy(vertexMap_fused, d_vertexMap_fused, IMG_WIDTH*IMG_HEIGHT * sizeof(vec), hipMemcpyDeviceToHost); } /*====================================== Ray-casting (All: x, y, z direction) ======================================*/ void voxelGrid::rayCastAll(std::vector<vec> &pcData) { //Upload data to GPU pc_count = 0; hipMemcpy(d_pc_count, &pc_count, sizeof(unsigned int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( rayCastAll_X_CUDA), dim3(10), dim3(512), 0, 0, d_vox, d_pc, d_pc_count); hipLaunchKernelGGL(( rayCastAll_Y_CUDA), dim3(21), dim3(512), 0, 0, d_vox, d_pc, d_pc_count); hipLaunchKernelGGL(( rayCastAll_Z_CUDA), dim3(10), dim3(512), 0, 0, d_vox, d_pc, d_pc_count); //Download data from GPU hipMemcpy(&pc_count, d_pc_count, sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(pc, d_pc, pc_count * sizeof(vec), hipMemcpyDeviceToHost); for(int i = 0; i < pc_count; ++i) pcData.push_back(pc[i]); } /*====================================== Ray-casting (All: only z direction) ======================================*/ void voxelGrid::rayCastAll_approx(std::vector<vec> &pcData) { //Upload data to GPU pc_count = 0; hipMemcpy(d_pc_count, &pc_count, sizeof(unsigned int), hipMemcpyHostToDevice); hipLaunchKernelGGL(( rayCastAll_X_CUDA), dim3(48), dim3(512), 0, 0, d_vox, d_pc, d_pc_count); //Download data from GPU hipMemcpy(&pc_count, d_pc_count, sizeof(unsigned int), hipMemcpyDeviceToHost); hipMemcpy(pc, d_pc, pc_count * sizeof(vec), hipMemcpyDeviceToHost); //pcData.insert(pcData.being(), pc, pc+pc_count); for(int i = 0; i < pc_count; ++i) pcData.push_back(pc[i]); } /*====================================== Set translation vector ======================================*/ void voxelGrid::setTrans(float x, float y, float z) { trans.x = x; trans.y = y; trans.z = z; hipMemcpy(d_trans, &trans, sizeof(vec), hipMemcpyHostToDevice); } void voxelGrid::setTrans(vec t) { trans = t; hipMemcpy(d_trans, &trans, sizeof(vec), hipMemcpyHostToDevice); } /*====================================== Set rotation matrix ======================================*/ void voxelGrid::setRot(float x1, float x2, float x3, float y1, float y2, float y3, float z1, float z2, float z3) { rot.x1 = x1; rot.x2 = x2; rot.x3 = x3; rot.y1 = y1; rot.y2 = y2; rot.y3 = y3; rot.z1 = z1; rot.z2 = z2; rot.z3 = z3; hipMemcpy(d_rot, &rot, sizeof(mat), hipMemcpyHostToDevice); } void voxelGrid::setRot(mat r) { rot = r; hipMemcpy(d_rot, &rot, sizeof(mat), hipMemcpyHostToDevice); } /*====================================== Compute offset ======================================*/ void voxelGrid::computeOffset() { vec dir_cur; //Rotate the orientation of the camera dir_cur.x = rot.x1 * dir.x + rot.x2 * dir.y + rot.x3 * dir.z; dir_cur.y = rot.y1 * dir.x + rot.y2 * dir.y + rot.y3 * dir.z; dir_cur.z = rot.z1 * dir.x + rot.z2 * dir.y + rot.z3 * dir.z; //Compute the offset of the voxel index offset.x = int((trans.x + SHIFT * dir_cur.x - MAX_LEN_VOL/2) / VOX_LEN); offset.y = int((trans.y + SHIFT * dir_cur.y - MAX_LEN_VOL/2) / VOX_LEN); offset.z = int((trans.z + SHIFT * dir_cur.z - MAX_LEN_VOL/2) / VOX_LEN); //Upload data to the GPU memory hipMemcpy(d_offset, &offset, sizeof(vec), hipMemcpyHostToDevice); } /*====================================== Get volume offset ======================================*/ vec voxelGrid::getOffsetTrans() { vec offsetTrans; offsetTrans.x = offset.x * VOX_LEN; offsetTrans.y = offset.y * VOX_LEN; offsetTrans.z = offset.z * VOX_LEN; return offsetTrans; } /*==================================== Compute TSDF from the point cloud ====================================*/ void voxelGrid::inverseTSDF(std::vector<vec> &pc) { vec vertexMap[IMG_WIDTH*IMG_HEIGHT]; for(int n = 0; n < 2; ++n) for(int k = 0; k < 10; ++k) for(int j = 0; j < 20; ++j){ for(int i = 0; i < IMG_WIDTH*IMG_HEIGHT; ++i){ vertexMap[i].x = 0; vertexMap[i].y = 0; vertexMap[i].z = 0; } setTrans(256*j, 256*k, 1024*n); setRot(1, 0, 0, 0, 1, 0, 0, 0, 1); for(int i = 0; i < pc.size(); ++i){ vec tmp, point, point_viewSpace; point = pc[i]; //Varify the condition if(point.x < 0 || point.x > MAX_LEN_X || point.y < 0 || point.y > MAX_LEN_Y || point.z < 0 || point.z > MAX_LEN_Z) continue; //Point in view space tmp.x = point.x - trans.x; tmp.y = point.y - trans.y; tmp.z = point.z - trans.z; point_viewSpace.x = rot.x1 * tmp.x + rot.y1 * tmp.y + rot.z1 * tmp.z; point_viewSpace.y = rot.x2 * tmp.x + rot.y2 * tmp.y + rot.z2 * tmp.z; point_viewSpace.z = rot.x3 * tmp.x + rot.y3 * tmp.y + rot.z3 * tmp.z; //Projected onto the img plane if(point_viewSpace.z > 0){ int x = (int)(point_viewSpace.x * FOCAL_LEN / point_viewSpace.z + IMG_WIDTH/2); int y = (int)(point_viewSpace.y * -FOCAL_LEN / point_viewSpace.z + IMG_HEIGHT/2); int pix = IMG_WIDTH * y + x; //The pixel is within the depth map if (x > 0 && x < IMG_WIDTH && y > 0 && y < IMG_HEIGHT){ vertexMap[pix] = point_viewSpace; } } } computeOffset(); fusion(vertexMap); } } voxel* voxelGrid::getVox() { //Download voxel data hipMemcpy(vox, d_vox, RESOL_X*RESOL_Y*RESOL_Z * sizeof(voxel), hipMemcpyDeviceToHost); return vox; }
af9f32ae61a6a117fdba2a74e5c3ce80cc331f63.cu
#include "voxelGrid.h" /*================================== Volumetric fusion CUDA ==================================*/ __global__ void fusion_CUDA(voxel *vox, vec *vertexMap, vec *trans, mat *rot, vec *offset, const int n) { //Compute the index of the voxel const int index_vol = n*RESOL_VOL*RESOL_VOL + blockIdx.x * 512 + threadIdx.x; const int i = index_vol / (RESOL_VOL*RESOL_VOL); const int j = (index_vol % (RESOL_VOL*RESOL_VOL)) / RESOL_VOL; const int k = (index_vol % (RESOL_VOL*RESOL_VOL)) % RESOL_VOL; //Add offset to the voxel int a = i + offset->z; int b = j + offset->y; int c = k + offset->x; const int index = a * RESOL_Y*RESOL_Z + b * RESOL_Z + c; //Varify the condition if (a < 0 || a > RESOL_X || b < 0 || b > RESOL_Y || c < 0 || c > RESOL_Z || (vox[index].isActive && vox[index].weight > 8)) return; vec tmp, point_vox, dist; //Voxel centers in view space tmp.x = vox[index].pos.x - trans->x; tmp.y = vox[index].pos.y - trans->y; tmp.z = vox[index].pos.z - trans->z; point_vox.x = rot->x1 * tmp.x + rot->y1 * tmp.y + rot->z1 * tmp.z; point_vox.y = rot->x2 * tmp.x + rot->y2 * tmp.y + rot->z2 * tmp.z; point_vox.z = rot->x3 * tmp.x + rot->y3 * tmp.y + rot->z3 * tmp.z; //Projected onto the img plane if (point_vox.z > LOWER_BOUND){ int x = (int)(point_vox.x * FOCAL_LEN / point_vox.z + IMG_WIDTH/2); int y = (int)(point_vox.y * -FOCAL_LEN / point_vox.z + IMG_HEIGHT/2); int pix = IMG_WIDTH * y + x; //The pixel is within the depth map if (x > 0 && x < IMG_WIDTH && y > 0 && y < IMG_HEIGHT){ //Point cloud in view space dist.x = vertexMap[pix].x - point_vox.x; dist.y = vertexMap[pix].y - point_vox.y; dist.z = vertexMap[pix].z - point_vox.z; //Compute SDF float norm = sqrtf(dist.x * dist.x + dist.y * dist.y + dist.z * dist.z); float tsdf = (dist.z > 0) ? norm / TRUNCATE : -norm / TRUNCATE; //Truncate SDF if (tsdf >= 1) vox[index].tsdf = (vox[index].weight * vox[index].tsdf + 1) / (vox[index].weight + 1); else if (tsdf > -1){ vox[index].tsdf = (vox[index].weight * vox[index].tsdf + tsdf) / (vox[index].weight + 1); vox[index].isActive = true; } else vox[index].tsdf = (vox[index].weight * vox[index].tsdf - 1) / (vox[index].weight + 1); ++vox[index].weight; } } } /*================================== Ray-casting for a depth map CUDA ==================================*/ __global__ void rayCast_CUDA(voxel *vox, vec *vertexMap, vec *vertexMap_fused, vec *bound1, vec *bound2, vec *center, vec *trans, mat *rot) { //Compute the index of the pixel const int x = (blockIdx.x * 512 + threadIdx.x) % IMG_WIDTH; const int y = (blockIdx.x * 512 + threadIdx.x) / IMG_WIDTH; const int pix = IMG_WIDTH * y + x; vec tmp, rayDir, rayPos, rayStep; float norm; float tsdf_prev, tsdf_cur = 1; bool isActive_prev, isActive_cur = false; //Compute ray direction & position tmp.x = (x - IMG_WIDTH/2) / FOCAL_LEN; tmp.y = (IMG_HEIGHT/2 - y) / FOCAL_LEN; tmp.z = 1; rayDir.x = rot->x1 * tmp.x + rot->x2 * tmp.y + rot->x3 * tmp.z; rayDir.y = rot->y1 * tmp.x + rot->y2 * tmp.y + rot->y3 * tmp.z; rayDir.z = rot->z1 * tmp.x + rot->z2 * tmp.y + rot->z3 * tmp.z; norm = sqrtf(rayDir.x * rayDir.x + rayDir.y * rayDir.y + rayDir.z * rayDir.z); //Stepping value per iteration rayStep.x = STEP * rayDir.x / norm; rayStep.y = STEP * rayDir.y / norm; rayStep.z = STEP * rayDir.z / norm; rayPos.x = center->x + rayStep.x; rayPos.y = center->y + rayStep.y; rayPos.z = center->z + rayStep.z; //Start casting //If rayPos is in the volume while(rayPos.x > VOX_LEN && rayPos.x < MAX_LEN_X && rayPos.y > VOX_LEN && rayPos.y < MAX_LEN_Y && rayPos.z > VOX_LEN && rayPos.z < MAX_LEN_Z){ int index = ((int)(rayPos.z / VOX_LEN)) * RESOL_Y*RESOL_Z + ((int)(rayPos.y / VOX_LEN)) * RESOL_Z + ((int)(rayPos.x / VOX_LEN)); tsdf_prev = tsdf_cur; tsdf_cur = vox[index].tsdf; isActive_prev = isActive_cur; isActive_cur = vox[index].isActive; //Detect a zero-crossing if (tsdf_cur * tsdf_prev < 0 && (isActive_cur || isActive_prev)){ //Interpolation & Transform to the view space tmp.x = vox[index].pos.x - vox[index].tsdf * rayDir.x - trans->x; tmp.y = vox[index].pos.y - vox[index].tsdf * rayDir.y - trans->y; tmp.z = vox[index].pos.z - vox[index].tsdf * rayDir.z - trans->z; vertexMap_fused[pix].x = rot->x1 * tmp.x + rot->y1 * tmp.y + rot->z1 * tmp.z; vertexMap_fused[pix].y = rot->x2 * tmp.x + rot->y2 * tmp.y + rot->z2 * tmp.z; vertexMap_fused[pix].z = rot->x3 * tmp.x + rot->y3 * tmp.y + rot->z3 * tmp.z; return; } rayPos.x += rayStep.x; rayPos.y += rayStep.y; rayPos.z += rayStep.z; } //If the ray didn't hit a point, use the raw data vertexMap_fused[pix].x = vertexMap[pix].x; vertexMap_fused[pix].y = vertexMap[pix].y; vertexMap_fused[pix].z = vertexMap[pix].z; } /*================================== Ray-casting (All: x direction) ==================================*/ __global__ void rayCastAll_X_CUDA(voxel *vox, vec *pc, unsigned int *pc_count) { //Compute the index of the voxel const int a = (blockIdx.x * 512 + threadIdx.x) / RESOL_Z; const int b = (blockIdx.x * 512 + threadIdx.x) % RESOL_Z; float tsdf_prev, tsdf_cur = 0; bool isActive_prev, isActive_cur = false; int index_tmp, index; index_tmp = a * RESOL_Z + b; //Start casting for (int c = 1; c < RESOL_X; ++c){ index = index_tmp + c * RESOL_Y*RESOL_Z; tsdf_prev = tsdf_cur; tsdf_cur = vox[index].tsdf; isActive_prev = isActive_cur; isActive_cur = vox[index].isActive; //Detect a zero-crossing if ((isActive_cur || isActive_prev) && tsdf_cur * tsdf_prev < 0 && *pc_count < MAX_PC_COUNT){ //Interpolation vec p = vox[index].pos; p.x -= vox[index].tsdf; pc[atomicAdd(pc_count, 1)] = p; } } } /*================================== Ray-casting (All: y direction) ==================================*/ __global__ void rayCastAll_Y_CUDA(voxel *vox, vec *pc, unsigned int *pc_count) { //Compute the index of the voxel const int a = (blockIdx.x * 512 + threadIdx.x) / RESOL_X; const int b = (blockIdx.x * 512 + threadIdx.x) % RESOL_X; float tsdf_prev, tsdf_cur = 0; bool isActive_prev, isActive_cur = false; int index_tmp, index; index_tmp = a * RESOL_Y*RESOL_Z + b; //Start casting for (int c = 1; c < RESOL_Y; ++c){ index = index_tmp + c * RESOL_Z; tsdf_prev = tsdf_cur; tsdf_cur = vox[index].tsdf; isActive_prev = isActive_cur; isActive_cur = vox[index].isActive; //Detect a zero-crossing if ((isActive_cur || isActive_prev) && tsdf_cur * tsdf_prev < 0 && *pc_count < MAX_PC_COUNT){ //Interpolation vec p = vox[index].pos; p.y -= vox[index].tsdf; pc[atomicAdd(pc_count, 1)] = p; } } } /*================================== Ray-casting (All: z direction) ==================================*/ __global__ void rayCastAll_Z_CUDA(voxel *vox, vec *pc, unsigned int *pc_count) { //Compute the index of the voxel const int a = (blockIdx.x * 512 + threadIdx.x) / RESOL_X; const int b = (blockIdx.x * 512 + threadIdx.x) % RESOL_X; float tsdf_prev, tsdf_cur = 0; bool isActive_prev, isActive_cur = false; int index_tmp, index; index_tmp = a * RESOL_Y*RESOL_Z + b * RESOL_Z; //Start casting for (int c = 1; c < RESOL_Z; ++c){ index = index_tmp + c; tsdf_prev = tsdf_cur; tsdf_cur = vox[index].tsdf; isActive_prev = isActive_cur; isActive_cur = vox[index].isActive; //Detect a zero-crossing if ((isActive_cur || isActive_prev) && tsdf_cur * tsdf_prev < 0 && *pc_count < MAX_PC_COUNT){ //Interpolation vec p = vox[index].pos; p.z -= vox[index].tsdf; pc[atomicAdd(pc_count, 1)] = p; } } } /*====================================== Constructor ======================================*/ voxelGrid::voxelGrid() { //Initialize the properties center.x = MAX_LEN_X / 2.0f; center.y = MAX_LEN_Y / 2.0f; center.z = MAX_LEN_Z / 2.0f; dir.x = 0; dir.y = 0; dir.z = 1; trans.x = 0; trans.y = 0; trans.z = 0; rot.x1 = 1; rot.x2 = 0; rot.x3 = 0; rot.y1 = 0; rot.y2 = 1; rot.y3 = 0; rot.z1 = 0; rot.z2 = 0; rot.z3 = 1; //Allocate memory for each voxel vox = new voxel[RESOL_X*RESOL_Y*RESOL_Z]; pc = new vec[MAX_PC_COUNT]; //Initialize each voxel for (int i = 0; i < RESOL_X; i++) for (int j = 0; j < RESOL_Y; j++) for (int k = 0; k < RESOL_Z; k++){ int index = i*RESOL_Y*RESOL_Z + j*RESOL_Z + k; vox[index].pos.x = k * VOX_LEN + VOX_LEN / 2.f; vox[index].pos.y = j * VOX_LEN + VOX_LEN / 2.f, vox[index].pos.z = i * VOX_LEN + VOX_LEN / 2.f; vox[index].tsdf = 1; vox[index].weight = 0; vox[index].isActive = false; } pc_count = 0; //Allocate the GPU memory cudaMalloc(&d_vox, RESOL_X*RESOL_Y*RESOL_Z * sizeof(voxel)); cudaMalloc(&d_pc, MAX_PC_COUNT * sizeof(vec)); cudaMalloc(&d_vertexMap, IMG_WIDTH*IMG_HEIGHT * sizeof(vec)); cudaMalloc(&d_vertexMap_fused, IMG_WIDTH*IMG_HEIGHT * sizeof(vec)); cudaMalloc(&d_bound1, sizeof(vec)); cudaMalloc(&d_bound2, sizeof(vec)); cudaMalloc(&d_center, sizeof(vec)); cudaMalloc(&d_trans, sizeof(vec)); cudaMalloc(&d_rot, sizeof(mat)); cudaMalloc(&d_offset, sizeof(vec)); cudaMalloc(&d_pc_count, sizeof(unsigned int)); //Upload data to the GPU memory cudaMemcpy(d_vox, vox, RESOL_X*RESOL_Y*RESOL_Z * sizeof(voxel), cudaMemcpyHostToDevice); cudaMemcpy(d_pc, pc, MAX_PC_COUNT * sizeof(vec), cudaMemcpyHostToDevice); cudaMemcpy(d_trans, &trans, sizeof(vec), cudaMemcpyHostToDevice); cudaMemcpy(d_rot, &rot, sizeof(mat), cudaMemcpyHostToDevice); cudaMemcpy(d_offset, &offset, sizeof(vec), cudaMemcpyHostToDevice); cudaMemcpy(d_pc_count, &pc_count, sizeof(unsigned int), cudaMemcpyHostToDevice); } /*====================================== Destructor ======================================*/ voxelGrid::~voxelGrid() { //Free the GPU memory cudaFree(d_vox); cudaFree(d_pc); cudaFree(d_vertexMap); cudaFree(d_vertexMap_fused); cudaFree(d_bound1); cudaFree(d_bound2); cudaFree(d_center); cudaFree(d_trans); cudaFree(d_rot); cudaFree(d_offset); cudaFree(d_pc_count); delete [] vox; delete [] pc; } /*====================================== TSDF Fusion ======================================*/ void voxelGrid::fusion(vec *vertexMap) { //Upload data to GPU cudaMemcpy(d_vertexMap, vertexMap, IMG_WIDTH*IMG_HEIGHT * sizeof(vec), cudaMemcpyHostToDevice); //Start fusion //RESOL_VOL = 128 //for (int j = 0; j < RESOL_VOL; j += 8) // fusion_CUDA << <512, 512 >> > (d_vox, d_vertexMap, d_trans, d_rot, d_offset, j); //RESOL_VOL = 192 for (int j = 0; j < RESOL_VOL; j += 8) fusion_CUDA << <576, 512 >> > (d_vox, d_vertexMap, d_trans, d_rot, d_offset, j); //RESOL_VOL = 256 //for (int j = 0; j < RESOL_VOL; j += 4) // fusion_CUDA << <512, 512 >> > (d_vox, d_vertexMap, d_trans, d_rot, d_offset, j); } /*====================================== Ray-casting ======================================*/ void voxelGrid::rayCast(vec *vertexMap_fused) { //Compute boundary center.x = trans.x; center.y = trans.y; center.z = trans.z; bound1.x = (center.x <= VOX_LEN) ? center.x : VOX_LEN; bound1.y = (center.y <= VOX_LEN) ? center.y : VOX_LEN; bound1.z = (center.z <= VOX_LEN) ? center.z : VOX_LEN; bound2.x = (center.x >= MAX_LEN_X) ? center.x : MAX_LEN_X; bound2.y = (center.y >= MAX_LEN_Y) ? center.y : MAX_LEN_Y; bound2.z = (center.z >= MAX_LEN_Z) ? center.z : MAX_LEN_Z; //Upload data to the GPU memory cudaMemcpy(d_center, &center, sizeof(vec), cudaMemcpyHostToDevice); cudaMemcpy(d_bound1, &bound1, sizeof(vec), cudaMemcpyHostToDevice); cudaMemcpy(d_bound2, &bound2, sizeof(vec), cudaMemcpyHostToDevice); //Start ray-casting rayCast_CUDA << <168, 512 >> > (d_vox, d_vertexMap, d_vertexMap_fused, d_bound1, d_bound2, d_center, d_trans, d_rot); //Download data from the GPU memory cudaMemcpy(vertexMap_fused, d_vertexMap_fused, IMG_WIDTH*IMG_HEIGHT * sizeof(vec), cudaMemcpyDeviceToHost); } /*====================================== Ray-casting (All: x, y, z direction) ======================================*/ void voxelGrid::rayCastAll(std::vector<vec> &pcData) { //Upload data to GPU pc_count = 0; cudaMemcpy(d_pc_count, &pc_count, sizeof(unsigned int), cudaMemcpyHostToDevice); rayCastAll_X_CUDA<<<10, 512>>>(d_vox, d_pc, d_pc_count); rayCastAll_Y_CUDA<<<21, 512>>>(d_vox, d_pc, d_pc_count); rayCastAll_Z_CUDA<<<10, 512>>>(d_vox, d_pc, d_pc_count); //Download data from GPU cudaMemcpy(&pc_count, d_pc_count, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(pc, d_pc, pc_count * sizeof(vec), cudaMemcpyDeviceToHost); for(int i = 0; i < pc_count; ++i) pcData.push_back(pc[i]); } /*====================================== Ray-casting (All: only z direction) ======================================*/ void voxelGrid::rayCastAll_approx(std::vector<vec> &pcData) { //Upload data to GPU pc_count = 0; cudaMemcpy(d_pc_count, &pc_count, sizeof(unsigned int), cudaMemcpyHostToDevice); rayCastAll_X_CUDA<<<48, 512>>>(d_vox, d_pc, d_pc_count); //Download data from GPU cudaMemcpy(&pc_count, d_pc_count, sizeof(unsigned int), cudaMemcpyDeviceToHost); cudaMemcpy(pc, d_pc, pc_count * sizeof(vec), cudaMemcpyDeviceToHost); //pcData.insert(pcData.being(), pc, pc+pc_count); for(int i = 0; i < pc_count; ++i) pcData.push_back(pc[i]); } /*====================================== Set translation vector ======================================*/ void voxelGrid::setTrans(float x, float y, float z) { trans.x = x; trans.y = y; trans.z = z; cudaMemcpy(d_trans, &trans, sizeof(vec), cudaMemcpyHostToDevice); } void voxelGrid::setTrans(vec t) { trans = t; cudaMemcpy(d_trans, &trans, sizeof(vec), cudaMemcpyHostToDevice); } /*====================================== Set rotation matrix ======================================*/ void voxelGrid::setRot(float x1, float x2, float x3, float y1, float y2, float y3, float z1, float z2, float z3) { rot.x1 = x1; rot.x2 = x2; rot.x3 = x3; rot.y1 = y1; rot.y2 = y2; rot.y3 = y3; rot.z1 = z1; rot.z2 = z2; rot.z3 = z3; cudaMemcpy(d_rot, &rot, sizeof(mat), cudaMemcpyHostToDevice); } void voxelGrid::setRot(mat r) { rot = r; cudaMemcpy(d_rot, &rot, sizeof(mat), cudaMemcpyHostToDevice); } /*====================================== Compute offset ======================================*/ void voxelGrid::computeOffset() { vec dir_cur; //Rotate the orientation of the camera dir_cur.x = rot.x1 * dir.x + rot.x2 * dir.y + rot.x3 * dir.z; dir_cur.y = rot.y1 * dir.x + rot.y2 * dir.y + rot.y3 * dir.z; dir_cur.z = rot.z1 * dir.x + rot.z2 * dir.y + rot.z3 * dir.z; //Compute the offset of the voxel index offset.x = int((trans.x + SHIFT * dir_cur.x - MAX_LEN_VOL/2) / VOX_LEN); offset.y = int((trans.y + SHIFT * dir_cur.y - MAX_LEN_VOL/2) / VOX_LEN); offset.z = int((trans.z + SHIFT * dir_cur.z - MAX_LEN_VOL/2) / VOX_LEN); //Upload data to the GPU memory cudaMemcpy(d_offset, &offset, sizeof(vec), cudaMemcpyHostToDevice); } /*====================================== Get volume offset ======================================*/ vec voxelGrid::getOffsetTrans() { vec offsetTrans; offsetTrans.x = offset.x * VOX_LEN; offsetTrans.y = offset.y * VOX_LEN; offsetTrans.z = offset.z * VOX_LEN; return offsetTrans; } /*==================================== Compute TSDF from the point cloud ====================================*/ void voxelGrid::inverseTSDF(std::vector<vec> &pc) { vec vertexMap[IMG_WIDTH*IMG_HEIGHT]; for(int n = 0; n < 2; ++n) for(int k = 0; k < 10; ++k) for(int j = 0; j < 20; ++j){ for(int i = 0; i < IMG_WIDTH*IMG_HEIGHT; ++i){ vertexMap[i].x = 0; vertexMap[i].y = 0; vertexMap[i].z = 0; } setTrans(256*j, 256*k, 1024*n); setRot(1, 0, 0, 0, 1, 0, 0, 0, 1); for(int i = 0; i < pc.size(); ++i){ vec tmp, point, point_viewSpace; point = pc[i]; //Varify the condition if(point.x < 0 || point.x > MAX_LEN_X || point.y < 0 || point.y > MAX_LEN_Y || point.z < 0 || point.z > MAX_LEN_Z) continue; //Point in view space tmp.x = point.x - trans.x; tmp.y = point.y - trans.y; tmp.z = point.z - trans.z; point_viewSpace.x = rot.x1 * tmp.x + rot.y1 * tmp.y + rot.z1 * tmp.z; point_viewSpace.y = rot.x2 * tmp.x + rot.y2 * tmp.y + rot.z2 * tmp.z; point_viewSpace.z = rot.x3 * tmp.x + rot.y3 * tmp.y + rot.z3 * tmp.z; //Projected onto the img plane if(point_viewSpace.z > 0){ int x = (int)(point_viewSpace.x * FOCAL_LEN / point_viewSpace.z + IMG_WIDTH/2); int y = (int)(point_viewSpace.y * -FOCAL_LEN / point_viewSpace.z + IMG_HEIGHT/2); int pix = IMG_WIDTH * y + x; //The pixel is within the depth map if (x > 0 && x < IMG_WIDTH && y > 0 && y < IMG_HEIGHT){ vertexMap[pix] = point_viewSpace; } } } computeOffset(); fusion(vertexMap); } } voxel* voxelGrid::getVox() { //Download voxel data cudaMemcpy(vox, d_vox, RESOL_X*RESOL_Y*RESOL_Z * sizeof(voxel), cudaMemcpyDeviceToHost); return vox; }
5a6ae78f613c433bda76d8ac63a6a5d475e75a9f.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> //#include "cv.h" //#include "highgui.h" #include "elas.h" #include <vector> #include "triangle.h" #include "matrix.h" #include <stdlib.h> using namespace std; /*** * 1. hipHostMalloc and hipHostFree wrap * 2. create Descriptor of two img * 3. compute support point * 4. convert dis to cloud * 5. cuda_computeD * 6. leftRightConsistencyCheck * */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__);} inline void gpuAssert(hipError_t code, const char *file, int line, bool abort = true) { if(code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if(abort) exit(code); } } #define WIDTH 320 #define HEIGH 240 #define GRID_SIZE 20 enum setting { ROBOTICS, MIDDLEBURY }; // parameter set Elas::parameters param; struct plane { float t1a, t1b, t1c; float t2a; }; __global__ void leftRightConsistencyCheck(float* D1_g, float* D2_g); /** * 1. hipHostMalloc and hipHostFree wrap * * */ static int count_alloc = 0; static int count_free = 0; void cudaFreeHost_cpuaa(void *p) { count_free++; hipHostFree(p); hipError_t err = hipGetLastError(); if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err)); } void SetDeviceMap() { hipSetDeviceFlags(hipDeviceMapHost); } void* HostMal(void **p, long size) { count_alloc++; void *p_g; // hipHostMalloc((void**)p,size, hipHostMallocDefault | hipHostMallocMapped); hipHostMalloc((void**)p, size, hipHostMallocDefault ); hipError_t err = hipGetLastError(); if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err)); // hipHostGetDevicePointer(&p_g, *p, 0); err = hipGetLastError(); if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err)); return p_g; } void allocFreeCount() { printf("count_alloc = %d\n", count_alloc); printf("count_free= %d\n", count_free); } /** * 1. over: hipHostMalloc and hipHostFree wrap * */ //dim3 threads(320 - 6 , 1); //dim3 grid( 1, 240 -6 ); => (0, 233) /** * 2. create Descriptor of two img * */ __global__ \ void createDesc_gpu_kernel(uint8_t* I_desc, uint8_t* I_du, uint8_t* I_dv) { int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y; int x = u + 3; int y = v + 3; __shared__ uint8_t I_du_share[320 * 5]; __shared__ uint8_t I_dv_share[320 * 3]; uint8_t *I_desc_curr; for(int i = 0; i < 5; i++){ *(I_du_share + x + i * 320) = *(I_du + x + (y-2 + i) * 320); } for(int i = 0; i < 3; i++){ *(I_dv_share + x + i * 320) = *(I_dv + x + (y-1 + i) * 320); } __syncthreads(); I_desc_curr = I_desc + (y* WIDTH + x) * 16; *(I_desc_curr++) = *(I_du_share + (0 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x - 2) ); *(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x + 2) ); *(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x - 1) ); *(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 2) ); *(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x - 2) ); *(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x + 2) ); *(I_desc_curr++) = *(I_du_share + (4 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_dv_share + (0 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_dv_share + (1 * WIDTH + x + 1) ); *(I_desc_curr++) = *(I_dv_share + (1 * WIDTH + x - 1) ); *(I_desc_curr++) = *(I_dv_share + (2 * WIDTH + x + 0) ); } int __createDesc_gpu(uint8_t* I_desc, uint8_t* I_du_g, uint8_t* I_dv_g ) { dim3 threads(WIDTH - 6 , 1); dim3 grid( 1, HEIGH - 6 ); // hipDeviceSynchronize(); hipLaunchKernelGGL(( createDesc_gpu_kernel), dim3(grid), dim3(threads), 0 , 0, I_desc, I_du_g, I_dv_g ); gpuErrchk(hipDeviceSynchronize()); // hipError_t err = hipGetLastError(); // printf("cuda error: %s\n", hipGetErrorString(err)); // hipDeviceSynchronize(); //2.88 - 0.19 } /** * 2. over: create Descriptor of two img * */ /** * 3. compute support point * */ __device__ \ uint32_t getAddressOffsetImage1(const int32_t& u, const int32_t& v, const int32_t& width) { return v*width + u; } __device__ \ unsigned int computeMatchEnergy1(unsigned char* dst1, unsigned char* dst2, int offset) { unsigned int a, b, c, e, r0, r4; a = abs(*(dst1 + offset) - *(dst2 + offset)) + abs(*(dst1 + offset + 1) - *(dst2 + offset + 1)); b = abs(*(dst1 + offset + 2) - *(dst2 + offset + 2)) + abs(*(dst1 + offset + 3) - *(dst2 + offset + 3)); c = abs(*(dst1 + offset + 4) - *(dst2 + offset + 4)) + abs(*(dst1 + offset + 5) - *(dst2 + offset + 5)); e = abs(*(dst1 + offset + 6) - *(dst2 + offset + 6)) + abs(*(dst1 + offset + 7) - *(dst2 + offset + 7)); r0 = a + b + c + e; a = abs(*(dst1 + offset + 8) - *(dst2 + offset + 8)) + abs(*(dst1 + offset + 9) - *(dst2 + offset + 9)); b = abs(*(dst1 + offset + 10) - *(dst2 + offset + 10)) + abs(*(dst1 + offset + 11) - *(dst2 + offset + 11)); c = abs(*(dst1 + offset + 12) - *(dst2 + offset + 12)) + abs(*(dst1 + offset + 13) - *(dst2 + offset + 13)); e = abs(*(dst1 + offset + 14) - *(dst2 + offset + 14)) + abs(*(dst1 + offset + 15) - *(dst2 + offset + 15)); r4 = a + b + c + e; return r0 + r4; } __device__ \ uint32_t getAddressOffsetGrid1(const int32_t& x, const int32_t& y, \ const int32_t& d, const int32_t& width, const int32_t& disp_num) { return (y*width + x)*disp_num + d; } __device__ \ void updatePosteriorMinimumNew(unsigned char* dst1, unsigned char* dst2, const int32_t &d, int32_t &val, int32_t &min_val, int32_t &min_d) { val = computeMatchEnergy1(dst1, dst2, 0); if (val<min_val) { min_val = val; min_d = d; } } __device__ \ void updatePosteriorMinimumNew1(unsigned char* dst1, unsigned char* dst2, const int32_t &d, const int8_t w, int32_t &val, int32_t &min_val, int32_t &min_d) { val = computeMatchEnergy1(dst1, dst2, 0) + w; if (val<min_val) { min_val = val; min_d = d; } } int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } __device__ \ unsigned int computeMatchEnergy1_new(unsigned char* dst1_1, unsigned char* dst1_2, unsigned char* dst2_1, unsigned char* dst2_2, int32_t u, int32_t u_wrap) { unsigned int r0, r1, r2, r3; r0 = 0; r1 = 0; r2 = 0; r3 = 0; #pragma unroll for (int i = 0; i < 16; i++) { r0 += abs(dst2_1[(u_wrap << 4) - 32 + i] - dst1_1[(u << 4) - 32 + i]); r1 += abs(dst2_1[(u_wrap << 4) + 32 + i] - dst1_1[(u << 4) + 32 + i]); r2 += abs(dst2_2[(u_wrap << 4) - 32 + i] - dst1_2[(u << 4) - 32 + i]); r3 += abs(dst2_2[(u_wrap << 4) + 32 + i] - dst1_2[(u << 4) + 32 + i]); } return r0 + r1 + r2 + r3; } #define D_candidate_stepsize 5 #define INCON_THRESHOLD 5 #define INCON_MIN_SUPPORT 5 #define INCON_WINDOW_SIZE 5 #define SUPPORT_TEXTURE 10 #define DISP_MIN 0 #define DISP_MAX 63 #define SUPPORT_THRESHOLD 0.85 #define U_STEP 2 #define V_STEP 2 #define WINDOW_SIZE 3 #define MIN_1_E 32767 #define MIN_1_D -1 #define MIN_2_E 32767 #define MIN_2_D -1 #define DESC_OFFSET_1 (-16 * U_STEP) #define DESC_OFFSET_2 (+16 * U_STEP) #define DESC_OFFSET_3 (-16 * U_STEP) #define DESC_OFFSET_4 (+16 * U_STEP) #define BLOCKX 60 #define BLOCKY 1 #define GRIDX 1 #define GRIDY 46 //#define GRIDY 2 //dim3 threads(60, 1); //dim3 grid(1, 46); __constant__ uint32_t oneLine = WIDTH * 16; __global__ void compEner_gpu(uint8_t* I1_desc_shared, uint8_t* I2_desc_shared, int u, int u_wrap, uint32_t* sumResult) { int x = threadIdx.x; // x = (0,15) int32_t sum = 0; sum = abs(I1_desc_shared[(u - 2) << 4 + x ] - I2_desc_shared[(u_wrap - 2) << 4 + x]); sum += abs(I1_desc_shared[(u + 2) << 4 + x ] - I2_desc_shared[(u_wrap + 2) << 4 + x]); sum += abs(I1_desc_shared[(u + 2) << 4 + x + oneLine] - I2_desc_shared[(u_wrap + 2) << 4 + x +oneLine]); sum += abs(I1_desc_shared[(u - 2) << 4 + x + oneLine] - I2_desc_shared[(u_wrap - 2) << 4 + x +oneLine]); sumResult[x] = sum; } __global__ void sptMathKernel(int32_t D_can_width, int32_t D_can_height, int8_t* D_can, uint8_t* desc1, uint8_t* desc2) { int32_t u_wrap; int disp_max_valid; int result1 = 0, result2 = 0, result3 = 0, result4 = 0; int32_t line_offset; uint8_t *I1_line_addr, *I2_line_addr, *I1_block_addr, *I2_block_addr, *I_line_addr_tmp; uint8_t *I1_block_addr_1, *I1_block_addr_2, *I2_block_addr_1, *I2_block_addr_2; int32_t sum = 0; int16_t min_1_E; int16_t min_1_d; int16_t min_2_E; int16_t min_2_d; int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int u, v, d1 = -1 , d2 = -1; u = (x + 3) * D_candidate_stepsize; //5 v = (y + 1) * D_candidate_stepsize; line_offset = 16 * WIDTH*v; I1_line_addr = desc1 + line_offset; I2_line_addr = desc2 + line_offset; __shared__ uint8_t I1_desc_shared[320 * 16 * 2]; __shared__ uint8_t I2_desc_shared[320 * 16 * 2]; for(int i = 0; i < 85; i++){ I1_desc_shared[x + i * BLOCKX ] = *(I1_line_addr + x + i * BLOCKX - 2 * oneLine); I1_desc_shared[x + i * BLOCKX + oneLine] = *(I1_line_addr + x + i * BLOCKX + 2 * oneLine); I2_desc_shared[x + i * BLOCKX ] = *(I2_line_addr + x + i * BLOCKX - 2 * oneLine); I2_desc_shared[x + i * BLOCKX + oneLine] = *(I2_line_addr + x + i * BLOCKX + 2 * oneLine); } __syncthreads(); for (int32_t i=0; i<16; i++) sum += abs((int32_t)(*(I1_line_addr + 16 * u +i))-128); if (sum<10){ d1 = -1; return; } I1_block_addr_1 = I1_desc_shared + 16 * u ; I1_block_addr_2 = I1_desc_shared + 16 * u + oneLine ; disp_max_valid = min(63, u - 5); min_1_E = MIN_1_E; min_1_d = MIN_1_D; min_2_E = MIN_2_E; min_2_d = MIN_2_D; for (int16_t d = 0; d <= disp_max_valid; d++) { u_wrap = u - d; I2_block_addr_1 = I2_desc_shared + 16 * u_wrap; I2_block_addr_2 = I2_desc_shared + 16 * u_wrap + oneLine; // result1 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_1); // result2 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_2); // result3 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_3); result4 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_4); // sum = result1 + result2 + result3 + result4; sum = result4; if (sum<min_1_E) { min_2_E = min_1_E; min_2_d = min_1_d; min_1_E = sum; min_1_d = d; } else if (sum<min_2_E) { min_2_E = sum; min_2_d = d; } } if (min_1_d>=0 && min_2_d>=0 && (float)min_1_E < 0.85*(float)min_2_E) d1 = min_1_d; sum = 0; if (d1 >= 0){ min_1_E = MIN_1_E; min_1_d = MIN_1_D; min_2_E = MIN_2_E; min_2_d = MIN_2_D; u = u - d1; disp_max_valid = min(63, WIDTH - u - 5); I2_block_addr_1 = I2_desc_shared + 16 * u; I2_block_addr_2 = I2_desc_shared + 16 * u + 320 * 16; sum = 0; #pragma unroll for (int32_t i=0; i<16; i++) sum += abs((int32_t)(*(I2_line_addr+i))-128); if (sum<10){ return; } sum = 0; for(int16_t d = 0; d <= disp_max_valid; d++){ u_wrap = u + d; I1_block_addr_1 = I1_desc_shared + 16 * u_wrap; I1_block_addr_2 = I1_desc_shared + 16 * u_wrap + 320 * 16; // result1 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_1); // result2 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_2); // result3 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_3); result4 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_4); // sum = result1 + result2 + result3 + result4; sum = result4; // sum = computeMatchEnergy1_new(I2_desc_shared, I2_desc_shared + oneLine, I1_desc_shared, I1_desc_shared + oneLine, u, u_wrap); if (sum<min_1_E) { min_2_E = min_1_E; min_2_d = min_1_d; min_1_E = sum; min_1_d = d; } else if (sum<min_2_E) { min_2_E = sum; min_2_d = d; } } if (min_1_d>=0 && min_2_d>=0 && (float)min_1_E < 0.85*(float)min_2_E) d2 = min_1_d; if( d2 >= 0 && abs(d2 - d1) <= 2 ) D_can[x + y * D_can_width] = d1; } } void addCornerSupportPoints(vector<Elas::support_pt> &p_support, int32_t width, int32_t height) { // list of border points vector<Elas::support_pt> p_border; p_border.push_back(Elas::support_pt(0, 0, 0)); p_border.push_back(Elas::support_pt(0, height - 1, 0)); p_border.push_back(Elas::support_pt(width - 1, 0, 0)); p_border.push_back(Elas::support_pt(width - 1, height - 1, 0)); // find closest d for (int32_t i = 0; i<p_border.size(); i++) { int32_t best_dist = 10000000; for (int32_t j = 0; j<p_support.size(); j++) { int32_t du = p_border[i].u - p_support[j].u; int32_t dv = p_border[i].v - p_support[j].v; int32_t curr_dist = du*du + dv*dv; if (curr_dist<best_dist) { best_dist = curr_dist; p_border[i].d = p_support[j].d; } } } // for right image p_border.push_back(Elas::support_pt(p_border[2].u + p_border[2].d, p_border[2].v, p_border[2].d)); p_border.push_back(Elas::support_pt(p_border[3].u + p_border[3].d, p_border[3].v, p_border[3].d)); // add border points to support points for (int32_t i = 0; i<p_border.size(); i++) p_support.push_back(p_border[i]); } __global__ void removeInconsistentSupportPoints1(int16_t* D_can, int32_t D_can_width, int32_t D_can_height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int u, v; if (x < D_can_width && y < D_can_height) { int16_t d_can = *(D_can + getAddressOffsetImage1(x, y, D_can_width)); if (d_can >= 0) { int32_t support = 0; for (int32_t u_can_2 = x - INCON_WINDOW_SIZE; u_can_2 <= x + INCON_WINDOW_SIZE; u_can_2++) { for (int32_t v_can_2 = y - INCON_WINDOW_SIZE; v_can_2 <= y + INCON_WINDOW_SIZE; v_can_2++) { if (u_can_2 >= 0 && v_can_2 >= 0 && u_can_2<D_can_width && v_can_2<D_can_height) { int16_t d_can_2 = *(D_can + getAddressOffsetImage1(u_can_2, v_can_2, D_can_width)); if (d_can_2 >= 0 && abs(d_can - d_can_2) <= INCON_THRESHOLD) support++; } } } // invalidate support point if number of supporting points is too low if (support<INCON_MIN_SUPPORT) *(D_can + getAddressOffsetImage1(x, y, D_can_width)) = -1; } } } __global__ void removeRedundantSupportPoints1(int16_t* D_can, int32_t D_can_width, int32_t D_can_height, int32_t redun_max_dist, int32_t redun_threshold, bool vertical) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < D_can_width && y < D_can_height) { // parameters int32_t redun_dir_u[2] = { 0,0 }; int32_t redun_dir_v[2] = { 0,0 }; if (vertical) { redun_dir_v[0] = -1; redun_dir_v[1] = +1; } else { redun_dir_u[0] = -1; redun_dir_u[1] = +1; } int16_t d_can = *(D_can + getAddressOffsetImage1(x, y, D_can_width)); if (d_can >= 0) { // check all directions for redundancy bool redundant = true; for (int32_t i = 0; i<2; i++) { // search for support int32_t u_can_2 = x; int32_t v_can_2 = y; int16_t d_can_2; bool support = false; for (int32_t j = 0; j<redun_max_dist; j++) { u_can_2 += redun_dir_u[i]; v_can_2 += redun_dir_v[i]; if (u_can_2<0 || v_can_2<0 || u_can_2 >= D_can_width || v_can_2 >= D_can_height) break; d_can_2 = *(D_can + getAddressOffsetImage1(u_can_2, v_can_2, D_can_width)); if (d_can_2 >= 0 && abs(d_can - d_can_2) <= redun_threshold) { support = true; break; } } // if we have no support => point is not redundant if (!support) { redundant = false; break; } } // invalidate support point if it is redundant if (redundant) *(D_can + getAddressOffsetImage1(x, y, D_can_width)) = -1; } } } vector<Elas::support_pt> computeSupportMatches_g(uint8_t* I_desc1, uint8_t* I_desc2, \ int8_t* D_sup_c, int8_t* D_sup_g) { // create matrix for saving disparity candidates int32_t D_can_width = 60; //[15,310] => 60 int32_t D_can_height = 48; //[5, 230] => 46 gpuErrchk(hipMemset(D_sup_g, -1, D_can_width*D_can_height * sizeof(int8_t))); dim3 threads(BLOCKX, BLOCKY); dim3 grid(GRIDX, GRIDY); gpuErrchk(hipFuncSetCacheConfig(sptMathKernel,hipFuncCachePreferShared)); //compute support // hipDeviceSynchronize(); // sptMathKernel << <grid, threads, 0 >> > (D_can_width, D_can_height, D_sup_g, I_desc1, I_desc2); //hipDeviceSynchronize(); //13ms gpuErrchk(hipDeviceSynchronize()); //put D_sup to vector of support vector<Elas::support_pt> p_support; for (int32_t v_can = 0; v_can<D_can_height; v_can++) for (int32_t u_can = 0; u_can<D_can_width; u_can++) if (*(D_sup_c + u_can + v_can * D_can_width) >= 0) p_support.push_back(Elas::support_pt((u_can + 3)*D_candidate_stepsize, (v_can + 1) * D_candidate_stepsize, *(D_sup_c+ u_can + v_can * D_can_width))); return p_support; } /** * 3. over: compute support point * */ /** * 4. convert dis to cloud ***/ __global__ void Convert(float *D_g, float *cloud_g) { int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y; v += 20; float w = 0, x = 0, y = 0, z = 0; if(u + v * WIDTH > 320 * 240) printf("\n+++++++++++++ id3 out %d\n", u + v * WIDTH); float dis = D_g[u + v * WIDTH]; w = 0.006669723997311648 * dis; if(w == 0 ){ // printf("bug. w == 0\n"); w = 0.00000001; } x = (float)((u - 161.2100334167481) / w); y = (float)(- (v - 119.9240913391113) / w); //has bug z = (float)(241.57918 / w); *(cloud_g + (u + v * WIDTH) * 3) = x; *(cloud_g + (u + v * WIDTH) * 3 + 1) = y; *(cloud_g + (u + v * WIDTH) * 3 + 2) = z; if(dis == -1) { *(cloud_g + (u + v * WIDTH) * 3) = 10000; *(cloud_g + (u + v * WIDTH) * 3 + 1) = 10000; *(cloud_g + (u + v * WIDTH) * 3 + 2) = 10000; } // *(cloud_g + (u + v * WIDTH) * 3) = z; // *(cloud_g + (u + v * WIDTH) * 3 + 1) = y; // *(cloud_g + (u + v * WIDTH) * 3 + 2) = x; // A = [u, v, d,1]; // Q = [1, 0, 0, -161.2100334167481; 0, 1, 0, -119.9240913391113; // 0, 0, 0, 241.57918; 0, 0, 0.006669723997311648, 0] } int ConvertD2Z(float* D1_g, float* cloud_g) { dim3 threads(320, 1); dim3 grid(1, 200); // printf("conv\n"); hipLaunchKernelGGL(( Convert), dim3(grid), dim3(threads), 0, 0, D1_g, cloud_g); // printf("conv2\n"); // hipDeviceSynchronize(); } /** * 4. over: convert dis to cloud ***/ /**** * 5. cuda_computeD ***/ int tri_size = 0; __constant__ int32_t grid_dims_g[3] = {65, 16, 12} ; __constant__ int8_t temp[] = {-14,-9,-2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; __global__ void Triangle_Match1(Elas::triangle* tri, int32_t* disparity_grid,\ uint8_t* I1_desc, uint8_t* I2_desc, int8_t* P, \ int32_t plane_radius, bool right_image, float* D, \ uint32_t* tp, int tri_size) { float plane_a = 0, plane_b = 0, plane_c = 0, plane_d = 0; int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y ; uint32_t id; __shared__ uint8_t __I1_desc_share[320 * 16]; __shared__ uint8_t __I2_desc_share[320 * 16]; // if(u == 245 && v == 5){ // printf("aa:\n"); // } if(u >= 320) printf("\n+++++++++++++ u out %d\n", u); if(v >= 240) printf("\n+++++++++++++ v out %d\n", v); for(int i = 0; i < 16; i += 1 ) { __I1_desc_share[u + i*320] = I1_desc[v * 320*16 + u + i*320]; __I2_desc_share[u + i*320] = I2_desc[v * 320*16 + u + i*320]; } __syncthreads(); id = tp[u + v * WIDTH]; if(u + v * WIDTH > 320 * 240) printf("\n+++++++++++++ id1 out %d\n", u + v * WIDTH); if(id > tri_size) printf("\n+++++++++++++ id2 out %d,%d\n", id, tri_size); plane_a = tri[id].t1a; plane_b = tri[id].t1b; plane_c = tri[id].t1c; plane_d = tri[id].t2a; bool valid = fabs(plane_a)<0.7 && fabs(plane_d)<0.7; const int32_t window_size = 2; // address of disparity we want to compute uint32_t d_addr; d_addr = getAddressOffsetImage1(u, v, WIDTH); if(d_addr > 320 * 240) printf("+++++++++d_addr out %d\n", d_addr); uint8_t *I1_line_addr, *I2_line_addr; I2_line_addr = __I2_desc_share ; uint8_t* I1_block_addr = __I1_desc_share + 16 * u; // does this patch have enough texture? int32_t sum = 0; //int32_t match_texture = 1; // //#pragma unroll // for (int32_t i = 0; i<16; i++) // sum += abs((int32_t)(*(I1_block_addr + i)) - 127); // if (sum<match_texture) // return; // compute disparity, min disparity and max disparity of plane prior int32_t d_plane = (int32_t)(plane_a*(float)u + plane_b*(float)v + plane_c); // int32_t d_plane = (int32_t)(0); int32_t d_plane_min = max(d_plane - plane_radius, 0); int32_t d_plane_max = min(d_plane + plane_radius, grid_dims_g[0] - 2); // get grid pointer int32_t grid_x = (int32_t)floor((float)u / (float)GRID_SIZE); int32_t grid_y = (int32_t)floor((float)v / (float)GRID_SIZE); //(gird_y * 16 + grid_x) * 65 // uint32_t grid_addr = (grid_y * grid_dims_g[1] + grid_x ) * grid_dims_g[0] + 0; //getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims_g[1], grid_dims_g[0]); uint32_t grid_addr = getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims_g[1], grid_dims_g[0]); // uint32_t grid_addr = (grid_y * 16 + grid_x ) * 65 + 0; //getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims_g[1], grid_dims_g[0]); if( (grid_addr + 1) > 65 * 12 * 16){ printf("++++++++++ grid_addr out %d, %d, %d, %d\n", grid_x, grid_y, grid_dims_g[1], grid_dims_g[0]); } int32_t num_grid = *(disparity_grid + grid_addr); if( num_grid > 64 ) printf("++++++++++ num_grid out %d\n", num_grid); int32_t* d_grid = disparity_grid + grid_addr + 1; // loop variables int32_t d_curr, u_warp, val; int32_t min_val = 10000; int32_t min_d = -1; // if(u == 245 && v == 5){ // printf("aa:%d, %d, %d\n", num_grid, d_plane_min, d_plane_max); // } // left image if (!right_image) { for (int32_t i = 0; i<num_grid; i++) { d_curr = d_grid[i]; // printf("u=%d, %d\n", u, d_curr); if (d_curr<d_plane_min || d_curr>d_plane_max) { u_warp = u - d_curr; if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; if(u_warp < 0 || u_warp > 320) printf("_+++++++++ u_wrap1 out %d\n", u_warp); updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d); } } // if(v > 0 && v < 6 && u >0 && u < 20) // printf("2u=%d, %d, %d, %d, %d\n", u, v, d_curr, d_plane_min, d_plane_max); // int tmp; // for (tmp = d_plane_min; tmp <= d_plane_max; tmp++) { // tmp++; // updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * tmp, tmp, valid ? *(temp + abs(tmp - d_plane)) : 0, val, min_val, min_d); // } for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) { u_warp = u - d_curr; // if(v > 0 && v < 6 && u >0 && u < 20) // printf("1u=%d, %d, %d, %d, %d\n", u, v, d_curr, d_plane_min, d_plane_max); if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; if(u_warp < 0 || u_warp > 320) printf("_+++++++++ u_wrap2 out %d\n", u_warp); updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(temp + abs(d_curr - d_plane)) : 0, val, min_val, min_d); } } else { #pragma unroll for (int32_t i = 0; i<num_grid; i++) { d_curr = d_grid[i]; if (d_curr<d_plane_min || d_curr>d_plane_max) { u_warp = u + d_curr; if(u_warp < 0 || u_warp > 320){ // printf("_+++++++++ u_wrap3 out %d\n", u_warp); continue; } if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d); } } #pragma unroll for (d_curr = d_plane_min; d_curr < (d_plane_max + 1); d_curr++) { u_warp = u + d_curr; if(u_warp < 0 || u_warp > 320){ // printf("_+++++++++ u_wrap4 out %d,%d,%d,%d,%d\n", u, d_curr, u_warp, d_plane_min, d_plane_max); continue; } if (u_warp<window_size || u_warp >= WIDTH - window_size){ // printf("_+++++++++ u_wrap5 out\n"); continue; } updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(temp + abs(d_curr - d_plane)) : 0, val, min_val, min_d); } } // set disparity value // if (min_d >= 0){ if (min_d > 0){ *(D + d_addr) = (float)min_d; // MAP value (min neg-Log probability) }else *(D + d_addr) = -1; // invalid disparity } void computeTrianglePoints(const vector<Elas::support_pt> &p_support, const vector<Elas::triangle> &tri, \ bool right_image, int32_t width, int32_t TRI_SIZE, uint32_t* tp) { // loop variables int32_t c1, c2, c3; // float plane_a, plane_b, plane_c, plane_d; memset(tp, 0, WIDTH * HEIGH* sizeof(uint32_t)); // for all triangles do for (uint32_t i = 0; i<TRI_SIZE; i++) { int num = 0; // get plane parameters uint32_t p_i = i * 3; // triangle corners c1 = tri[i].c1; c2 = tri[i].c2; c3 = tri[i].c3; // sort triangle corners wrt. u (ascending) float tri_u[3]; if (!right_image) { // tri_u[0] = p_support[c1].u; tri_u[1] = p_support[c2].u; tri_u[2] = p_support[c3].u; } else { // tri_u[0] = p_support[c1].u - p_support[c1].d; tri_u[1] = p_support[c2].u - p_support[c2].d; tri_u[2] = p_support[c3].u - p_support[c3].d; } float tri_v[3] = { p_support[c1].v,p_support[c2].v,p_support[c3].v }; for (uint32_t j = 0; j<3; j++) { for (uint32_t k = 0; k<j; k++) { if (tri_u[k]>tri_u[j]) { float tri_u_temp = tri_u[j]; tri_u[j] = tri_u[k]; tri_u[k] = tri_u_temp; float tri_v_temp = tri_v[j]; tri_v[j] = tri_v[k]; tri_v[k] = tri_v_temp; } } } // rename corners float A_u = tri_u[0]; float A_v = tri_v[0]; float B_u = tri_u[1]; float B_v = tri_v[1]; float C_u = tri_u[2]; float C_v = tri_v[2]; // compute straight lines connecting triangle corners float AB_a = 0; float AC_a = 0; float BC_a = 0; if ((int32_t)(A_u) != (int32_t)(B_u)) AB_a = (A_v - B_v) / (A_u - B_u); if ((int32_t)(A_u) != (int32_t)(C_u)) AC_a = (A_v - C_v) / (A_u - C_u); if ((int32_t)(B_u) != (int32_t)(C_u)) BC_a = (B_v - C_v) / (B_u - C_u); float AB_b = A_v - AB_a*A_u; float AC_b = A_v - AC_a*A_u; float BC_b = B_v - BC_a*B_u; // first part (triangle corner A->B) if ((int32_t)(A_u) != (int32_t)(B_u)) { for (int32_t u = max((int32_t)A_u, 0); u < min((int32_t)B_u, width); u++) { int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b); int32_t v_2 = (uint32_t)(AB_a*(float)u + AB_b); for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++){ // *((int16_t*)(tp + 2 * u + v * 2 * width)) = u; // *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v; // *(tp + 2 * u + v * 2 * width + 1) = i; if(u + v * width > 320 * 240) { printf("hhh\n"); while(1); } *(tp + u + v * width ) = i; // num++; } } } // second part (triangle corner B->C) if ((int32_t)(B_u) != (int32_t)(C_u)) { for (int32_t u = max((int32_t)B_u, 0); u < min((int32_t)C_u, width); u++) { if (!param.subsampling || u % 2 == 0) { int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b); int32_t v_2 = (uint32_t)(BC_a*(float)u + BC_b); for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++) if (!param.subsampling || v % 2 == 0) { // *((int16_t*)(tp + 2 * u + v * 2 * width)) = u; // *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v; // *(tp + 2 * u + v * 2 * width + 1) = i; if(u + v * width > 320 * 240) { printf("hhh2\n"); while(1); } *(tp + u + v * width) = i; // num++; } } } } // tri[i].pointNum = num; } } hipError_t err; int32_t dims[3] = {WIDTH, HEIGH, WIDTH}; void cuda_computeD(int32_t* disparity_grid_1, int32_t* disparity_grid_2, vector<Elas::support_pt> &p_support, \ vector<Elas::triangle> &tri_1, vector<Elas::triangle> &tri_2, \ float* D1, float* D2, uint8_t* I1, uint8_t* I2, int8_t* P_g,\ uint32_t *tp1_g, uint32_t* tp2_g, uint32_t* tp1_c, uint32_t* tp2_c, float* cloud_g) { int32_t width, height, bpl; clock_t t1, t2; // get width, height and bytes per line width = dims[0]; // height = dims[1]; bpl = dims[2]; // // allocate memory for disparity grid int32_t grid_width = 16; //(int32_t)ceil((float)width / (float)20); int32_t grid_height = 12; //(int32_t)ceil((float)height / (float)20); int32_t grid_dims[3] = { 63 + 2,grid_width,grid_height }; int32_t P_SUPPORT_SIZE = p_support.size(); int32_t TRI_SIZE1 = tri_1.size(); int32_t TRI_SIZE2 = tri_2.size(); tri_size = TRI_SIZE1; // int8_t* tp1_cpu = tp1_c; // int8_t* tp2_cpu = tp2_c; // int8_t *tp1_gpu = tp1_g; // int8_t *tp2_gpu = tp2_g; computeTrianglePoints(p_support, tri_1, 0, width, TRI_SIZE1, tp1_c); computeTrianglePoints(p_support, tri_2, 1, width, TRI_SIZE2, tp2_c); Elas::triangle* tri_gpu_1, *tri_gpu_2; //int32_t *P_gpu = NULL; //action:::: cannot delete; hipMalloc((void **)&tri_gpu_1, sizeof(Elas::triangle) * TRI_SIZE1); hipMalloc((void **)&tri_gpu_2, sizeof(Elas::triangle) * TRI_SIZE2); // hipMalloc((void **)&P_gpu, sizeof(int8_t) * 64); err = hipGetLastError(); if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err)); // printf("copy\n"); hipMemcpy(tri_gpu_1, &tri_1[0], sizeof(Elas::triangle) * TRI_SIZE1, hipMemcpyHostToDevice); hipMemcpy(tri_gpu_2, &tri_2[0], sizeof(Elas::triangle) * TRI_SIZE2, hipMemcpyHostToDevice); //hipMemcpy(P_gpu, P_g, sizeof(int8_t) * 64, hipMemcpyHostToDevice); err = hipGetLastError(); if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err)); int32_t plane_radius = 2; //(int32_t)max((float)ceil(param.sigma*param.sradius), (float)2.0); // dim3 threads(280, 1); dim3 threads(320, 1); dim3 grid(1, 240); // hipDeviceSynchronize(); err = hipGetLastError(); if(0 != err) printf("cuda error: %s\n", hipGetErrorString(err)); // printf("going Triangle_match kernel\n"); // printf("tri_size1,2 = %d, %d\n", TRI_SIZE1, TRI_SIZE2); Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_1, disparity_grid_1, \ I1, I2, P_g, plane_radius, 0, D1, tp1_g, tri_size); tri_size = TRI_SIZE2; Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_2, disparity_grid_2, \ I2, I1, P_g, plane_radius, 1, D2, tp2_g, tri_size); err = hipGetLastError(); if(0 != err) printf("Triangle_Match1 cuda error: %s\n", hipGetErrorString(err)); // gpuErrchk(hipDeviceSynchronize()); //if(0 != err) printf("Triangle_Match1 cuda error: %s\n", hipGetErrorString(err)); // printf("leftRightConsistency\n"); hipLaunchKernelGGL(( leftRightConsistencyCheck), dim3(grid), dim3(threads), 0, 0, D1, D2); // hipDeviceSynchronize(); dim3 threads2(320, 1); dim3 grid2(1, 200); hipLaunchKernelGGL(( Convert), dim3(grid2), dim3(threads2), 0, 0, D1, cloud_g); gpuErrchk(err); hipFree((void*)tri_gpu_1); hipFree((void*)tri_gpu_2); // hipFree((void*)P_gpu); err =hipDeviceSynchronize(); gpuErrchk(err); } /**** * 5. over: cuda_computeD ***/ /*** * 6. leftRightConsistencyCheck * */ __global__ void leftRightConsistencyCheck(float* D1_g, float* D2_g) { int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y; // __shared__ float I_du_share[320]; // __shared__ float I_dv_share[320]; uint32_t addr = v * WIDTH + u; float d1 = *(D1_g + addr); float d2 = *(D2_g + addr); float u_warp_1 = u - d1; float u_warp_2 = u + d2; if(d1 >= 0 && u_warp_1 >= 0 && u_warp_1 < WIDTH) { uint32_t addr_warp = v * WIDTH + (int32_t)u_warp_1; if(fabs(*(D2_g + addr_warp) - d1) > 2 ) //|| (*(D2_g + addr_warp) - d1) < -2) *(D1_g + addr) = -1; }else *(D1_g + addr) = -1; } /*** * 6. leftRightConsistencyCheck * */
5a6ae78f613c433bda76d8ac63a6a5d475e75a9f.cu
#include "cuda_runtime.h" #include "device_launch_parameters.h" #include <stdio.h> #include <time.h> //#include "cv.h" //#include "highgui.h" #include "elas.h" #include <vector> #include "triangle.h" #include "matrix.h" #include <stdlib.h> using namespace std; /*** * 1. cudaHostAlloc and cudaFreeHost wrap * 2. create Descriptor of two img * 3. compute support point * 4. convert dis to cloud * 5. cuda_computeD * 6. leftRightConsistencyCheck * */ #define gpuErrchk(ans) { gpuAssert((ans), __FILE__, __LINE__);} inline void gpuAssert(cudaError_t code, const char *file, int line, bool abort = true) { if(code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if(abort) exit(code); } } #define WIDTH 320 #define HEIGH 240 #define GRID_SIZE 20 enum setting { ROBOTICS, MIDDLEBURY }; // parameter set Elas::parameters param; struct plane { float t1a, t1b, t1c; float t2a; }; __global__ void leftRightConsistencyCheck(float* D1_g, float* D2_g); /** * 1. cudaHostAlloc and cudaFreeHost wrap * * */ static int count_alloc = 0; static int count_free = 0; void cudaFreeHost_cpuaa(void *p) { count_free++; cudaFreeHost(p); cudaError_t err = cudaGetLastError(); if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err)); } void SetDeviceMap() { cudaSetDeviceFlags(cudaDeviceMapHost); } void* HostMal(void **p, long size) { count_alloc++; void *p_g; // cudaHostAlloc((void**)p,size, cudaHostAllocDefault | cudaHostAllocMapped); cudaHostAlloc((void**)p, size, cudaHostAllocDefault ); cudaError_t err = cudaGetLastError(); if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err)); //将常规的主机指针转换成指向设备内存空间的指针 cudaHostGetDevicePointer(&p_g, *p, 0); err = cudaGetLastError(); if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err)); return p_g; } void allocFreeCount() { printf("count_alloc = %d\n", count_alloc); printf("count_free= %d\n", count_free); } /** * 1. over: cudaHostAlloc and cudaFreeHost wrap * */ //dim3 threads(320 - 6 , 1); //dim3 grid( 1, 240 -6 ); => (0, 233) /** * 2. create Descriptor of two img * */ __global__ \ void createDesc_gpu_kernel(uint8_t* I_desc, uint8_t* I_du, uint8_t* I_dv) { int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y; int x = u + 3; int y = v + 3; __shared__ uint8_t I_du_share[320 * 5]; __shared__ uint8_t I_dv_share[320 * 3]; uint8_t *I_desc_curr; for(int i = 0; i < 5; i++){ *(I_du_share + x + i * 320) = *(I_du + x + (y-2 + i) * 320); } for(int i = 0; i < 3; i++){ *(I_dv_share + x + i * 320) = *(I_dv + x + (y-1 + i) * 320); } __syncthreads(); I_desc_curr = I_desc + (y* WIDTH + x) * 16; *(I_desc_curr++) = *(I_du_share + (0 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x - 2) ); *(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (1 * WIDTH + x + 2) ); *(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x - 1) ); *(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (2 * WIDTH + x + 2) ); *(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x - 2) ); *(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_du_share + (3 * WIDTH + x + 2) ); *(I_desc_curr++) = *(I_du_share + (4 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_dv_share + (0 * WIDTH + x + 0) ); *(I_desc_curr++) = *(I_dv_share + (1 * WIDTH + x + 1) ); *(I_desc_curr++) = *(I_dv_share + (1 * WIDTH + x - 1) ); *(I_desc_curr++) = *(I_dv_share + (2 * WIDTH + x + 0) ); } int __createDesc_gpu(uint8_t* I_desc, uint8_t* I_du_g, uint8_t* I_dv_g ) { dim3 threads(WIDTH - 6 , 1); dim3 grid( 1, HEIGH - 6 ); // cudaDeviceSynchronize(); createDesc_gpu_kernel<<<grid, threads, 0 >>>(I_desc, I_du_g, I_dv_g ); gpuErrchk(cudaDeviceSynchronize()); // cudaError_t err = cudaGetLastError(); // printf("cuda error: %s\n", cudaGetErrorString(err)); // cudaDeviceSynchronize(); //2.88 - 0.19 } /** * 2. over: create Descriptor of two img * */ /** * 3. compute support point * */ __device__ \ uint32_t getAddressOffsetImage1(const int32_t& u, const int32_t& v, const int32_t& width) { return v*width + u; } __device__ \ unsigned int computeMatchEnergy1(unsigned char* dst1, unsigned char* dst2, int offset) { unsigned int a, b, c, e, r0, r4; a = abs(*(dst1 + offset) - *(dst2 + offset)) + abs(*(dst1 + offset + 1) - *(dst2 + offset + 1)); b = abs(*(dst1 + offset + 2) - *(dst2 + offset + 2)) + abs(*(dst1 + offset + 3) - *(dst2 + offset + 3)); c = abs(*(dst1 + offset + 4) - *(dst2 + offset + 4)) + abs(*(dst1 + offset + 5) - *(dst2 + offset + 5)); e = abs(*(dst1 + offset + 6) - *(dst2 + offset + 6)) + abs(*(dst1 + offset + 7) - *(dst2 + offset + 7)); r0 = a + b + c + e; a = abs(*(dst1 + offset + 8) - *(dst2 + offset + 8)) + abs(*(dst1 + offset + 9) - *(dst2 + offset + 9)); b = abs(*(dst1 + offset + 10) - *(dst2 + offset + 10)) + abs(*(dst1 + offset + 11) - *(dst2 + offset + 11)); c = abs(*(dst1 + offset + 12) - *(dst2 + offset + 12)) + abs(*(dst1 + offset + 13) - *(dst2 + offset + 13)); e = abs(*(dst1 + offset + 14) - *(dst2 + offset + 14)) + abs(*(dst1 + offset + 15) - *(dst2 + offset + 15)); r4 = a + b + c + e; return r0 + r4; } __device__ \ uint32_t getAddressOffsetGrid1(const int32_t& x, const int32_t& y, \ const int32_t& d, const int32_t& width, const int32_t& disp_num) { return (y*width + x)*disp_num + d; } __device__ \ void updatePosteriorMinimumNew(unsigned char* dst1, unsigned char* dst2, const int32_t &d, int32_t &val, int32_t &min_val, int32_t &min_d) { val = computeMatchEnergy1(dst1, dst2, 0); if (val<min_val) { min_val = val; min_d = d; } } __device__ \ void updatePosteriorMinimumNew1(unsigned char* dst1, unsigned char* dst2, const int32_t &d, const int8_t w, int32_t &val, int32_t &min_val, int32_t &min_d) { val = computeMatchEnergy1(dst1, dst2, 0) + w; if (val<min_val) { min_val = val; min_d = d; } } int iDivUp(int a, int b) { return ((a % b) != 0) ? (a / b + 1) : (a / b); } __device__ \ unsigned int computeMatchEnergy1_new(unsigned char* dst1_1, unsigned char* dst1_2, unsigned char* dst2_1, unsigned char* dst2_2, int32_t u, int32_t u_wrap) { unsigned int r0, r1, r2, r3; r0 = 0; r1 = 0; r2 = 0; r3 = 0; #pragma unroll for (int i = 0; i < 16; i++) { r0 += abs(dst2_1[(u_wrap << 4) - 32 + i] - dst1_1[(u << 4) - 32 + i]); r1 += abs(dst2_1[(u_wrap << 4) + 32 + i] - dst1_1[(u << 4) + 32 + i]); r2 += abs(dst2_2[(u_wrap << 4) - 32 + i] - dst1_2[(u << 4) - 32 + i]); r3 += abs(dst2_2[(u_wrap << 4) + 32 + i] - dst1_2[(u << 4) + 32 + i]); } return r0 + r1 + r2 + r3; } #define D_candidate_stepsize 5 #define INCON_THRESHOLD 5 #define INCON_MIN_SUPPORT 5 #define INCON_WINDOW_SIZE 5 #define SUPPORT_TEXTURE 10 #define DISP_MIN 0 #define DISP_MAX 63 #define SUPPORT_THRESHOLD 0.85 #define U_STEP 2 #define V_STEP 2 #define WINDOW_SIZE 3 #define MIN_1_E 32767 #define MIN_1_D -1 #define MIN_2_E 32767 #define MIN_2_D -1 #define DESC_OFFSET_1 (-16 * U_STEP) #define DESC_OFFSET_2 (+16 * U_STEP) #define DESC_OFFSET_3 (-16 * U_STEP) #define DESC_OFFSET_4 (+16 * U_STEP) #define BLOCKX 60 #define BLOCKY 1 #define GRIDX 1 #define GRIDY 46 //#define GRIDY 2 //dim3 threads(60, 1); //dim3 grid(1, 46); __constant__ uint32_t oneLine = WIDTH * 16; __global__ void compEner_gpu(uint8_t* I1_desc_shared, uint8_t* I2_desc_shared, int u, int u_wrap, uint32_t* sumResult) { int x = threadIdx.x; // x = (0,15) int32_t sum = 0; sum = abs(I1_desc_shared[(u - 2) << 4 + x ] - I2_desc_shared[(u_wrap - 2) << 4 + x]); sum += abs(I1_desc_shared[(u + 2) << 4 + x ] - I2_desc_shared[(u_wrap + 2) << 4 + x]); sum += abs(I1_desc_shared[(u + 2) << 4 + x + oneLine] - I2_desc_shared[(u_wrap + 2) << 4 + x +oneLine]); sum += abs(I1_desc_shared[(u - 2) << 4 + x + oneLine] - I2_desc_shared[(u_wrap - 2) << 4 + x +oneLine]); sumResult[x] = sum; } __global__ void sptMathKernel(int32_t D_can_width, int32_t D_can_height, int8_t* D_can, uint8_t* desc1, uint8_t* desc2) { int32_t u_wrap; int disp_max_valid; int result1 = 0, result2 = 0, result3 = 0, result4 = 0; int32_t line_offset; uint8_t *I1_line_addr, *I2_line_addr, *I1_block_addr, *I2_block_addr, *I_line_addr_tmp; uint8_t *I1_block_addr_1, *I1_block_addr_2, *I2_block_addr_1, *I2_block_addr_2; int32_t sum = 0; int16_t min_1_E; int16_t min_1_d; int16_t min_2_E; int16_t min_2_d; int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int u, v, d1 = -1 , d2 = -1; u = (x + 3) * D_candidate_stepsize; //5 v = (y + 1) * D_candidate_stepsize; line_offset = 16 * WIDTH*v; I1_line_addr = desc1 + line_offset; I2_line_addr = desc2 + line_offset; __shared__ uint8_t I1_desc_shared[320 * 16 * 2]; __shared__ uint8_t I2_desc_shared[320 * 16 * 2]; for(int i = 0; i < 85; i++){ I1_desc_shared[x + i * BLOCKX ] = *(I1_line_addr + x + i * BLOCKX - 2 * oneLine); I1_desc_shared[x + i * BLOCKX + oneLine] = *(I1_line_addr + x + i * BLOCKX + 2 * oneLine); I2_desc_shared[x + i * BLOCKX ] = *(I2_line_addr + x + i * BLOCKX - 2 * oneLine); I2_desc_shared[x + i * BLOCKX + oneLine] = *(I2_line_addr + x + i * BLOCKX + 2 * oneLine); } __syncthreads(); for (int32_t i=0; i<16; i++) sum += abs((int32_t)(*(I1_line_addr + 16 * u +i))-128); if (sum<10){ d1 = -1; return; } I1_block_addr_1 = I1_desc_shared + 16 * u ; I1_block_addr_2 = I1_desc_shared + 16 * u + oneLine ; disp_max_valid = min(63, u - 5); min_1_E = MIN_1_E; min_1_d = MIN_1_D; min_2_E = MIN_2_E; min_2_d = MIN_2_D; for (int16_t d = 0; d <= disp_max_valid; d++) { u_wrap = u - d; I2_block_addr_1 = I2_desc_shared + 16 * u_wrap; I2_block_addr_2 = I2_desc_shared + 16 * u_wrap + oneLine; // result1 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_1); // result2 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_2); // result3 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_3); result4 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_4); // sum = result1 + result2 + result3 + result4; sum = result4; if (sum<min_1_E) { min_2_E = min_1_E; min_2_d = min_1_d; min_1_E = sum; min_1_d = d; } else if (sum<min_2_E) { min_2_E = sum; min_2_d = d; } } if (min_1_d>=0 && min_2_d>=0 && (float)min_1_E < 0.85*(float)min_2_E) d1 = min_1_d; sum = 0; if (d1 >= 0){ min_1_E = MIN_1_E; min_1_d = MIN_1_D; min_2_E = MIN_2_E; min_2_d = MIN_2_D; u = u - d1; disp_max_valid = min(63, WIDTH - u - 5); I2_block_addr_1 = I2_desc_shared + 16 * u; I2_block_addr_2 = I2_desc_shared + 16 * u + 320 * 16; sum = 0; #pragma unroll for (int32_t i=0; i<16; i++) sum += abs((int32_t)(*(I2_line_addr+i))-128); if (sum<10){ return; } sum = 0; for(int16_t d = 0; d <= disp_max_valid; d++){ u_wrap = u + d; I1_block_addr_1 = I1_desc_shared + 16 * u_wrap; I1_block_addr_2 = I1_desc_shared + 16 * u_wrap + 320 * 16; // result1 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_1); // result2 = computeMatchEnergy1(I1_block_addr_1, I2_block_addr_1, DESC_OFFSET_2); // result3 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_3); result4 = computeMatchEnergy1(I1_block_addr_2, I2_block_addr_2, DESC_OFFSET_4); // sum = result1 + result2 + result3 + result4; sum = result4; // sum = computeMatchEnergy1_new(I2_desc_shared, I2_desc_shared + oneLine, I1_desc_shared, I1_desc_shared + oneLine, u, u_wrap); if (sum<min_1_E) { min_2_E = min_1_E; min_2_d = min_1_d; min_1_E = sum; min_1_d = d; } else if (sum<min_2_E) { min_2_E = sum; min_2_d = d; } } if (min_1_d>=0 && min_2_d>=0 && (float)min_1_E < 0.85*(float)min_2_E) d2 = min_1_d; if( d2 >= 0 && abs(d2 - d1) <= 2 ) D_can[x + y * D_can_width] = d1; } } void addCornerSupportPoints(vector<Elas::support_pt> &p_support, int32_t width, int32_t height) { // list of border points vector<Elas::support_pt> p_border; p_border.push_back(Elas::support_pt(0, 0, 0)); p_border.push_back(Elas::support_pt(0, height - 1, 0)); p_border.push_back(Elas::support_pt(width - 1, 0, 0)); p_border.push_back(Elas::support_pt(width - 1, height - 1, 0)); // find closest d for (int32_t i = 0; i<p_border.size(); i++) { int32_t best_dist = 10000000; for (int32_t j = 0; j<p_support.size(); j++) { int32_t du = p_border[i].u - p_support[j].u; int32_t dv = p_border[i].v - p_support[j].v; int32_t curr_dist = du*du + dv*dv; if (curr_dist<best_dist) { best_dist = curr_dist; p_border[i].d = p_support[j].d; } } } // for right image p_border.push_back(Elas::support_pt(p_border[2].u + p_border[2].d, p_border[2].v, p_border[2].d)); p_border.push_back(Elas::support_pt(p_border[3].u + p_border[3].d, p_border[3].v, p_border[3].d)); // add border points to support points for (int32_t i = 0; i<p_border.size(); i++) p_support.push_back(p_border[i]); } __global__ void removeInconsistentSupportPoints1(int16_t* D_can, int32_t D_can_width, int32_t D_can_height) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; int u, v; if (x < D_can_width && y < D_can_height) { int16_t d_can = *(D_can + getAddressOffsetImage1(x, y, D_can_width)); if (d_can >= 0) { int32_t support = 0; for (int32_t u_can_2 = x - INCON_WINDOW_SIZE; u_can_2 <= x + INCON_WINDOW_SIZE; u_can_2++) { for (int32_t v_can_2 = y - INCON_WINDOW_SIZE; v_can_2 <= y + INCON_WINDOW_SIZE; v_can_2++) { if (u_can_2 >= 0 && v_can_2 >= 0 && u_can_2<D_can_width && v_can_2<D_can_height) { int16_t d_can_2 = *(D_can + getAddressOffsetImage1(u_can_2, v_can_2, D_can_width)); if (d_can_2 >= 0 && abs(d_can - d_can_2) <= INCON_THRESHOLD) support++; } } } // invalidate support point if number of supporting points is too low if (support<INCON_MIN_SUPPORT) *(D_can + getAddressOffsetImage1(x, y, D_can_width)) = -1; } } } __global__ void removeRedundantSupportPoints1(int16_t* D_can, int32_t D_can_width, int32_t D_can_height, int32_t redun_max_dist, int32_t redun_threshold, bool vertical) { int x = blockDim.x * blockIdx.x + threadIdx.x; int y = blockDim.y * blockIdx.y + threadIdx.y; if (x < D_can_width && y < D_can_height) { // parameters int32_t redun_dir_u[2] = { 0,0 }; int32_t redun_dir_v[2] = { 0,0 }; if (vertical) { redun_dir_v[0] = -1; redun_dir_v[1] = +1; } else { redun_dir_u[0] = -1; redun_dir_u[1] = +1; } int16_t d_can = *(D_can + getAddressOffsetImage1(x, y, D_can_width)); if (d_can >= 0) { // check all directions for redundancy bool redundant = true; for (int32_t i = 0; i<2; i++) { // search for support int32_t u_can_2 = x; int32_t v_can_2 = y; int16_t d_can_2; bool support = false; for (int32_t j = 0; j<redun_max_dist; j++) { u_can_2 += redun_dir_u[i]; v_can_2 += redun_dir_v[i]; if (u_can_2<0 || v_can_2<0 || u_can_2 >= D_can_width || v_can_2 >= D_can_height) break; d_can_2 = *(D_can + getAddressOffsetImage1(u_can_2, v_can_2, D_can_width)); if (d_can_2 >= 0 && abs(d_can - d_can_2) <= redun_threshold) { support = true; break; } } // if we have no support => point is not redundant if (!support) { redundant = false; break; } } // invalidate support point if it is redundant if (redundant) *(D_can + getAddressOffsetImage1(x, y, D_can_width)) = -1; } } } vector<Elas::support_pt> computeSupportMatches_g(uint8_t* I_desc1, uint8_t* I_desc2, \ int8_t* D_sup_c, int8_t* D_sup_g) { // create matrix for saving disparity candidates int32_t D_can_width = 60; //[15,310] => 60 int32_t D_can_height = 48; //[5, 230] => 46 gpuErrchk(cudaMemset(D_sup_g, -1, D_can_width*D_can_height * sizeof(int8_t))); dim3 threads(BLOCKX, BLOCKY); dim3 grid(GRIDX, GRIDY); gpuErrchk(cudaFuncSetCacheConfig(sptMathKernel,cudaFuncCachePreferShared)); //compute support // cudaDeviceSynchronize(); // sptMathKernel << <grid, threads, 0 >> > (D_can_width, D_can_height, D_sup_g, I_desc1, I_desc2); //cudaDeviceSynchronize(); //13ms gpuErrchk(cudaDeviceSynchronize()); //put D_sup to vector of support vector<Elas::support_pt> p_support; for (int32_t v_can = 0; v_can<D_can_height; v_can++) for (int32_t u_can = 0; u_can<D_can_width; u_can++) if (*(D_sup_c + u_can + v_can * D_can_width) >= 0) p_support.push_back(Elas::support_pt((u_can + 3)*D_candidate_stepsize, (v_can + 1) * D_candidate_stepsize, *(D_sup_c+ u_can + v_can * D_can_width))); return p_support; } /** * 3. over: compute support point * */ /** * 4. convert dis to cloud ***/ __global__ void Convert(float *D_g, float *cloud_g) { int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y; v += 20; float w = 0, x = 0, y = 0, z = 0; if(u + v * WIDTH > 320 * 240) printf("\n+++++++++++++ id3 out %d\n", u + v * WIDTH); float dis = D_g[u + v * WIDTH]; w = 0.006669723997311648 * dis; if(w == 0 ){ // printf("bug. w == 0\n"); w = 0.00000001; } x = (float)((u - 161.2100334167481) / w); y = (float)(- (v - 119.9240913391113) / w); //has bug z = (float)(241.57918 / w); *(cloud_g + (u + v * WIDTH) * 3) = x; *(cloud_g + (u + v * WIDTH) * 3 + 1) = y; *(cloud_g + (u + v * WIDTH) * 3 + 2) = z; if(dis == -1) { *(cloud_g + (u + v * WIDTH) * 3) = 10000; *(cloud_g + (u + v * WIDTH) * 3 + 1) = 10000; *(cloud_g + (u + v * WIDTH) * 3 + 2) = 10000; } // *(cloud_g + (u + v * WIDTH) * 3) = z; // *(cloud_g + (u + v * WIDTH) * 3 + 1) = y; // *(cloud_g + (u + v * WIDTH) * 3 + 2) = x; // A = [u, v, d,1]; // Q = [1, 0, 0, -161.2100334167481; 0, 1, 0, -119.9240913391113; // 0, 0, 0, 241.57918; 0, 0, 0.006669723997311648, 0] } int ConvertD2Z(float* D1_g, float* cloud_g) { dim3 threads(320, 1); dim3 grid(1, 200); // printf("conv\n"); Convert<<<grid, threads>>>(D1_g, cloud_g); // printf("conv2\n"); // cudaDeviceSynchronize(); } /** * 4. over: convert dis to cloud ***/ /**** * 5. cuda_computeD ***/ int tri_size = 0; __constant__ int32_t grid_dims_g[3] = {65, 16, 12} ; __constant__ int8_t temp[] = {-14,-9,-2,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0}; __global__ void Triangle_Match1(Elas::triangle* tri, int32_t* disparity_grid,\ uint8_t* I1_desc, uint8_t* I2_desc, int8_t* P, \ int32_t plane_radius, bool right_image, float* D, \ uint32_t* tp, int tri_size) { float plane_a = 0, plane_b = 0, plane_c = 0, plane_d = 0; int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y ; uint32_t id; __shared__ uint8_t __I1_desc_share[320 * 16]; __shared__ uint8_t __I2_desc_share[320 * 16]; // if(u == 245 && v == 5){ // printf("aa:\n"); // } if(u >= 320) printf("\n+++++++++++++ u out %d\n", u); if(v >= 240) printf("\n+++++++++++++ v out %d\n", v); for(int i = 0; i < 16; i += 1 ) { __I1_desc_share[u + i*320] = I1_desc[v * 320*16 + u + i*320]; __I2_desc_share[u + i*320] = I2_desc[v * 320*16 + u + i*320]; } __syncthreads(); id = tp[u + v * WIDTH]; if(u + v * WIDTH > 320 * 240) printf("\n+++++++++++++ id1 out %d\n", u + v * WIDTH); if(id > tri_size) printf("\n+++++++++++++ id2 out %d,%d\n", id, tri_size); plane_a = tri[id].t1a; plane_b = tri[id].t1b; plane_c = tri[id].t1c; plane_d = tri[id].t2a; bool valid = fabs(plane_a)<0.7 && fabs(plane_d)<0.7; const int32_t window_size = 2; // address of disparity we want to compute uint32_t d_addr; d_addr = getAddressOffsetImage1(u, v, WIDTH); if(d_addr > 320 * 240) printf("+++++++++d_addr out %d\n", d_addr); uint8_t *I1_line_addr, *I2_line_addr; I2_line_addr = __I2_desc_share ; uint8_t* I1_block_addr = __I1_desc_share + 16 * u; // does this patch have enough texture? int32_t sum = 0; //int32_t match_texture = 1; // //#pragma unroll // for (int32_t i = 0; i<16; i++) // sum += abs((int32_t)(*(I1_block_addr + i)) - 127); // if (sum<match_texture) // return; // compute disparity, min disparity and max disparity of plane prior int32_t d_plane = (int32_t)(plane_a*(float)u + plane_b*(float)v + plane_c); // int32_t d_plane = (int32_t)(0); int32_t d_plane_min = max(d_plane - plane_radius, 0); int32_t d_plane_max = min(d_plane + plane_radius, grid_dims_g[0] - 2); // get grid pointer int32_t grid_x = (int32_t)floor((float)u / (float)GRID_SIZE); int32_t grid_y = (int32_t)floor((float)v / (float)GRID_SIZE); //(gird_y * 16 + grid_x) * 65 // uint32_t grid_addr = (grid_y * grid_dims_g[1] + grid_x ) * grid_dims_g[0] + 0; //getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims_g[1], grid_dims_g[0]); uint32_t grid_addr = getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims_g[1], grid_dims_g[0]); // uint32_t grid_addr = (grid_y * 16 + grid_x ) * 65 + 0; //getAddressOffsetGrid1(grid_x, grid_y, 0, grid_dims_g[1], grid_dims_g[0]); if( (grid_addr + 1) > 65 * 12 * 16){ printf("++++++++++ grid_addr out %d, %d, %d, %d\n", grid_x, grid_y, grid_dims_g[1], grid_dims_g[0]); } int32_t num_grid = *(disparity_grid + grid_addr); if( num_grid > 64 ) printf("++++++++++ num_grid out %d\n", num_grid); int32_t* d_grid = disparity_grid + grid_addr + 1; // loop variables int32_t d_curr, u_warp, val; int32_t min_val = 10000; int32_t min_d = -1; // if(u == 245 && v == 5){ // printf("aa:%d, %d, %d\n", num_grid, d_plane_min, d_plane_max); // } // left image if (!right_image) { for (int32_t i = 0; i<num_grid; i++) { d_curr = d_grid[i]; // printf("u=%d, %d\n", u, d_curr); if (d_curr<d_plane_min || d_curr>d_plane_max) { u_warp = u - d_curr; if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; if(u_warp < 0 || u_warp > 320) printf("_+++++++++ u_wrap1 out %d\n", u_warp); updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d); } } // if(v > 0 && v < 6 && u >0 && u < 20) // printf("2u=%d, %d, %d, %d, %d\n", u, v, d_curr, d_plane_min, d_plane_max); // int tmp; // for (tmp = d_plane_min; tmp <= d_plane_max; tmp++) { // tmp++; // updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * tmp, tmp, valid ? *(temp + abs(tmp - d_plane)) : 0, val, min_val, min_d); // } for (d_curr = d_plane_min; d_curr <= d_plane_max; d_curr++) { u_warp = u - d_curr; // if(v > 0 && v < 6 && u >0 && u < 20) // printf("1u=%d, %d, %d, %d, %d\n", u, v, d_curr, d_plane_min, d_plane_max); if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; if(u_warp < 0 || u_warp > 320) printf("_+++++++++ u_wrap2 out %d\n", u_warp); updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(temp + abs(d_curr - d_plane)) : 0, val, min_val, min_d); } } else { #pragma unroll for (int32_t i = 0; i<num_grid; i++) { d_curr = d_grid[i]; if (d_curr<d_plane_min || d_curr>d_plane_max) { u_warp = u + d_curr; if(u_warp < 0 || u_warp > 320){ // printf("_+++++++++ u_wrap3 out %d\n", u_warp); continue; } if (u_warp<window_size || u_warp >= WIDTH - window_size) continue; updatePosteriorMinimumNew(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, val, min_val, min_d); } } #pragma unroll for (d_curr = d_plane_min; d_curr < (d_plane_max + 1); d_curr++) { u_warp = u + d_curr; if(u_warp < 0 || u_warp > 320){ // printf("_+++++++++ u_wrap4 out %d,%d,%d,%d,%d\n", u, d_curr, u_warp, d_plane_min, d_plane_max); continue; } if (u_warp<window_size || u_warp >= WIDTH - window_size){ // printf("_+++++++++ u_wrap5 out\n"); continue; } updatePosteriorMinimumNew1(I1_block_addr, I2_line_addr + 16 * u_warp, d_curr, valid ? *(temp + abs(d_curr - d_plane)) : 0, val, min_val, min_d); } } // set disparity value // if (min_d >= 0){ if (min_d > 0){ *(D + d_addr) = (float)min_d; // MAP value (min neg-Log probability) }else *(D + d_addr) = -1; // invalid disparity } void computeTrianglePoints(const vector<Elas::support_pt> &p_support, const vector<Elas::triangle> &tri, \ bool right_image, int32_t width, int32_t TRI_SIZE, uint32_t* tp) { // loop variables int32_t c1, c2, c3; // float plane_a, plane_b, plane_c, plane_d; memset(tp, 0, WIDTH * HEIGH* sizeof(uint32_t)); // for all triangles do for (uint32_t i = 0; i<TRI_SIZE; i++) { int num = 0; // get plane parameters uint32_t p_i = i * 3; // triangle corners c1 = tri[i].c1; c2 = tri[i].c2; c3 = tri[i].c3; // sort triangle corners wrt. u (ascending) float tri_u[3]; if (!right_image) { //左图像 tri_u[0] = p_support[c1].u; tri_u[1] = p_support[c2].u; tri_u[2] = p_support[c3].u; } else { //右图像 tri_u[0] = p_support[c1].u - p_support[c1].d; tri_u[1] = p_support[c2].u - p_support[c2].d; tri_u[2] = p_support[c3].u - p_support[c3].d; } float tri_v[3] = { p_support[c1].v,p_support[c2].v,p_support[c3].v }; for (uint32_t j = 0; j<3; j++) { for (uint32_t k = 0; k<j; k++) { if (tri_u[k]>tri_u[j]) { float tri_u_temp = tri_u[j]; tri_u[j] = tri_u[k]; tri_u[k] = tri_u_temp; float tri_v_temp = tri_v[j]; tri_v[j] = tri_v[k]; tri_v[k] = tri_v_temp; } } } // rename corners float A_u = tri_u[0]; float A_v = tri_v[0]; float B_u = tri_u[1]; float B_v = tri_v[1]; float C_u = tri_u[2]; float C_v = tri_v[2]; // compute straight lines connecting triangle corners float AB_a = 0; float AC_a = 0; float BC_a = 0; if ((int32_t)(A_u) != (int32_t)(B_u)) AB_a = (A_v - B_v) / (A_u - B_u); if ((int32_t)(A_u) != (int32_t)(C_u)) AC_a = (A_v - C_v) / (A_u - C_u); if ((int32_t)(B_u) != (int32_t)(C_u)) BC_a = (B_v - C_v) / (B_u - C_u); float AB_b = A_v - AB_a*A_u; float AC_b = A_v - AC_a*A_u; float BC_b = B_v - BC_a*B_u; // first part (triangle corner A->B) if ((int32_t)(A_u) != (int32_t)(B_u)) { for (int32_t u = max((int32_t)A_u, 0); u < min((int32_t)B_u, width); u++) { int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b); int32_t v_2 = (uint32_t)(AB_a*(float)u + AB_b); for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++){ // *((int16_t*)(tp + 2 * u + v * 2 * width)) = u; // *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v; // *(tp + 2 * u + v * 2 * width + 1) = i; if(u + v * width > 320 * 240) { printf("hhh\n"); while(1); } *(tp + u + v * width ) = i; // num++; } } } // second part (triangle corner B->C) if ((int32_t)(B_u) != (int32_t)(C_u)) { for (int32_t u = max((int32_t)B_u, 0); u < min((int32_t)C_u, width); u++) { if (!param.subsampling || u % 2 == 0) { int32_t v_1 = (uint32_t)(AC_a*(float)u + AC_b); int32_t v_2 = (uint32_t)(BC_a*(float)u + BC_b); for (int32_t v = min(v_1, v_2); v < max(v_1, v_2); v++) if (!param.subsampling || v % 2 == 0) { // *((int16_t*)(tp + 2 * u + v * 2 * width)) = u; // *((int16_t*)(tp + 2 * u + v * 2 * width) + 1) = v; // *(tp + 2 * u + v * 2 * width + 1) = i; if(u + v * width > 320 * 240) { printf("hhh2\n"); while(1); } *(tp + u + v * width) = i; // num++; } } } } // tri[i].pointNum = num; } } cudaError_t err; int32_t dims[3] = {WIDTH, HEIGH, WIDTH}; void cuda_computeD(int32_t* disparity_grid_1, int32_t* disparity_grid_2, vector<Elas::support_pt> &p_support, \ vector<Elas::triangle> &tri_1, vector<Elas::triangle> &tri_2, \ float* D1, float* D2, uint8_t* I1, uint8_t* I2, int8_t* P_g,\ uint32_t *tp1_g, uint32_t* tp2_g, uint32_t* tp1_c, uint32_t* tp2_c, float* cloud_g) { int32_t width, height, bpl; clock_t t1, t2; // get width, height and bytes per line width = dims[0]; // height = dims[1]; bpl = dims[2]; // // allocate memory for disparity grid int32_t grid_width = 16; //(int32_t)ceil((float)width / (float)20); int32_t grid_height = 12; //(int32_t)ceil((float)height / (float)20); int32_t grid_dims[3] = { 63 + 2,grid_width,grid_height }; int32_t P_SUPPORT_SIZE = p_support.size(); int32_t TRI_SIZE1 = tri_1.size(); int32_t TRI_SIZE2 = tri_2.size(); tri_size = TRI_SIZE1; // int8_t* tp1_cpu = tp1_c; // int8_t* tp2_cpu = tp2_c; // int8_t *tp1_gpu = tp1_g; // int8_t *tp2_gpu = tp2_g; computeTrianglePoints(p_support, tri_1, 0, width, TRI_SIZE1, tp1_c); computeTrianglePoints(p_support, tri_2, 1, width, TRI_SIZE2, tp2_c); Elas::triangle* tri_gpu_1, *tri_gpu_2; //int32_t *P_gpu = NULL; //action:::: cannot delete; cudaMalloc((void **)&tri_gpu_1, sizeof(Elas::triangle) * TRI_SIZE1); cudaMalloc((void **)&tri_gpu_2, sizeof(Elas::triangle) * TRI_SIZE2); // cudaMalloc((void **)&P_gpu, sizeof(int8_t) * 64); err = cudaGetLastError(); if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err)); // printf("copy\n"); cudaMemcpy(tri_gpu_1, &tri_1[0], sizeof(Elas::triangle) * TRI_SIZE1, cudaMemcpyHostToDevice); cudaMemcpy(tri_gpu_2, &tri_2[0], sizeof(Elas::triangle) * TRI_SIZE2, cudaMemcpyHostToDevice); //cudaMemcpy(P_gpu, P_g, sizeof(int8_t) * 64, cudaMemcpyHostToDevice); err = cudaGetLastError(); if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err)); int32_t plane_radius = 2; //(int32_t)max((float)ceil(param.sigma*param.sradius), (float)2.0); // dim3 threads(280, 1); dim3 threads(320, 1); dim3 grid(1, 240); // cudaDeviceSynchronize(); err = cudaGetLastError(); if(0 != err) printf("cuda error: %s\n", cudaGetErrorString(err)); // printf("going Triangle_match kernel\n"); // printf("tri_size1,2 = %d, %d\n", TRI_SIZE1, TRI_SIZE2); Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_1, disparity_grid_1, \ I1, I2, P_g, plane_radius, 0, D1, tp1_g, tri_size); tri_size = TRI_SIZE2; Triangle_Match1 << <grid, threads, 0>> > (tri_gpu_2, disparity_grid_2, \ I2, I1, P_g, plane_radius, 1, D2, tp2_g, tri_size); err = cudaGetLastError(); if(0 != err) printf("Triangle_Match1 cuda error: %s\n", cudaGetErrorString(err)); // gpuErrchk(cudaDeviceSynchronize()); //if(0 != err) printf("Triangle_Match1 cuda error: %s\n", cudaGetErrorString(err)); // printf("leftRightConsistency\n"); leftRightConsistencyCheck<<<grid, threads, 0>>>(D1, D2); // cudaDeviceSynchronize(); dim3 threads2(320, 1); dim3 grid2(1, 200); Convert<<<grid2, threads2>>>(D1, cloud_g); gpuErrchk(err); cudaFree((void*)tri_gpu_1); cudaFree((void*)tri_gpu_2); // cudaFree((void*)P_gpu); err =cudaDeviceSynchronize(); gpuErrchk(err); } /**** * 5. over: cuda_computeD ***/ /*** * 6. leftRightConsistencyCheck * */ __global__ void leftRightConsistencyCheck(float* D1_g, float* D2_g) { int u = blockDim.x * blockIdx.x + threadIdx.x; int v = blockDim.y * blockIdx.y + threadIdx.y; // __shared__ float I_du_share[320]; // __shared__ float I_dv_share[320]; uint32_t addr = v * WIDTH + u; float d1 = *(D1_g + addr); float d2 = *(D2_g + addr); float u_warp_1 = u - d1; float u_warp_2 = u + d2; if(d1 >= 0 && u_warp_1 >= 0 && u_warp_1 < WIDTH) { uint32_t addr_warp = v * WIDTH + (int32_t)u_warp_1; if(fabs(*(D2_g + addr_warp) - d1) > 2 ) //|| (*(D2_g + addr_warp) - d1) < -2) *(D1_g + addr) = -1; }else *(D1_g + addr) = -1; } /*** * 6. leftRightConsistencyCheck * */
36e298a079e5170a8ff255c629e8ba93be510fba.hip
// !!! This is a file automatically generated by hipify!!! // ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include "open3d/t/pipelines/kernel/TransformationConverterImpl.h" namespace open3d { namespace t { namespace pipelines { namespace kernel { template <typename scalar_t> __global__ void PoseToTransformationKernel(scalar_t *transformation_ptr, const scalar_t *X_ptr) { PoseToTransformationImpl(transformation_ptr, X_ptr); } template <typename scalar_t> void PoseToTransformationCUDA(scalar_t *transformation_ptr, const scalar_t *X_ptr) { utility::LogError("Unsupported data type."); } template <> void PoseToTransformationCUDA<float>(float *transformation_ptr, const float *X_ptr) { hipLaunchKernelGGL(( PoseToTransformationKernel<float>) , dim3(1), dim3(1), 0, core::cuda::GetStream(), transformation_ptr, X_ptr); } template <> void PoseToTransformationCUDA<double>(double *transformation_ptr, const double *X_ptr) { hipLaunchKernelGGL(( PoseToTransformationKernel<double>) , dim3(1), dim3(1), 0, core::cuda::GetStream(), transformation_ptr, X_ptr); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
36e298a079e5170a8ff255c629e8ba93be510fba.cu
// ---------------------------------------------------------------------------- // - Open3D: www.open3d.org - // ---------------------------------------------------------------------------- // Copyright (c) 2018-2023 www.open3d.org // SPDX-License-Identifier: MIT // ---------------------------------------------------------------------------- #include <cuda.h> #include <cuda_runtime.h> #include "open3d/t/pipelines/kernel/TransformationConverterImpl.h" namespace open3d { namespace t { namespace pipelines { namespace kernel { template <typename scalar_t> __global__ void PoseToTransformationKernel(scalar_t *transformation_ptr, const scalar_t *X_ptr) { PoseToTransformationImpl(transformation_ptr, X_ptr); } template <typename scalar_t> void PoseToTransformationCUDA(scalar_t *transformation_ptr, const scalar_t *X_ptr) { utility::LogError("Unsupported data type."); } template <> void PoseToTransformationCUDA<float>(float *transformation_ptr, const float *X_ptr) { PoseToTransformationKernel<float> <<<1, 1, 0, core::cuda::GetStream()>>>(transformation_ptr, X_ptr); } template <> void PoseToTransformationCUDA<double>(double *transformation_ptr, const double *X_ptr) { PoseToTransformationKernel<double> <<<1, 1, 0, core::cuda::GetStream()>>>(transformation_ptr, X_ptr); } } // namespace kernel } // namespace pipelines } // namespace t } // namespace open3d
549f711ce54ef50d90da212728e92cfcea9cca19.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "includes.h" __global__ void cudaSinv_kernel(unsigned int size, const float *x, float *y) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = index; i < size; i += stride) { y[i] = 1.0f / x[i]; } }
549f711ce54ef50d90da212728e92cfcea9cca19.cu
#include "includes.h" __global__ void cudaSinv_kernel(unsigned int size, const float *x, float *y) { const unsigned int index = blockIdx.x * blockDim.x + threadIdx.x; const unsigned int stride = blockDim.x * gridDim.x; for (unsigned int i = index; i < size; i += stride) { y[i] = 1.0f / x[i]; } }
f05b5b747c6843d15d5bf4514f2043bf279b827e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_bot; int xdim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_bot; int ydim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_bot; int xdim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_bot; int ydim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_bot*(y)+xdim0_update_halo_kernel2_zvel_plus_2_bot*ydim0_update_halo_kernel2_zvel_plus_2_bot*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_bot*(y)+xdim1_update_halo_kernel2_zvel_plus_2_bot*ydim1_update_halo_kernel2_zvel_plus_2_bot*(z)) //user function __device__ inline void update_halo_kernel2_zvel_plus_2_bot(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(0,2,0)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(0,2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_bot( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1 * xdim0_update_halo_kernel2_zvel_plus_2_bot * ydim0_update_halo_kernel2_zvel_plus_2_bot; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1 * xdim1_update_halo_kernel2_zvel_plus_2_bot * ydim1_update_halo_kernel2_zvel_plus_2_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_bot(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(78,"update_halo_kernel2_zvel_plus_2_bot"); OPS_kernels[78].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_bot_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_bot_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_bot_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_bot_h) { hipMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_bot, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_plus_2_bot_h = xdim0; hipMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_bot, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_plus_2_bot_h = ydim0; hipMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_bot, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_plus_2_bot_h = xdim1; hipMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_bot, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_plus_2_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[78].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data hipLaunchKernelGGL(( ops_update_halo_kernel2_zvel_plus_2_bot), dim3(grid), dim3(tblock) , 0, 0, (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(hipDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[78].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[78].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[78].transfer += ops_compute_transfer(dim, range, &arg1); }
f05b5b747c6843d15d5bf4514f2043bf279b827e.cu
// // auto-generated by ops.py // __constant__ int xdim0_update_halo_kernel2_zvel_plus_2_bot; int xdim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim0_update_halo_kernel2_zvel_plus_2_bot; int ydim0_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int xdim1_update_halo_kernel2_zvel_plus_2_bot; int xdim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; __constant__ int ydim1_update_halo_kernel2_zvel_plus_2_bot; int ydim1_update_halo_kernel2_zvel_plus_2_bot_h = -1; #define OPS_ACC0(x,y,z) (x+xdim0_update_halo_kernel2_zvel_plus_2_bot*(y)+xdim0_update_halo_kernel2_zvel_plus_2_bot*ydim0_update_halo_kernel2_zvel_plus_2_bot*(z)) #define OPS_ACC1(x,y,z) (x+xdim1_update_halo_kernel2_zvel_plus_2_bot*(y)+xdim1_update_halo_kernel2_zvel_plus_2_bot*ydim1_update_halo_kernel2_zvel_plus_2_bot*(z)) //user function __device__ inline void update_halo_kernel2_zvel_plus_2_bot(double *zvel0, double *zvel1, const int* fields) { if(fields[FIELD_ZVEL0] == 1) zvel0[OPS_ACC0(0,0,0)] = zvel0[OPS_ACC0(0,2,0)]; if(fields[FIELD_ZVEL1] == 1) zvel1[OPS_ACC1(0,0,0)] = zvel1[OPS_ACC1(0,2,0)]; } #undef OPS_ACC0 #undef OPS_ACC1 __global__ void ops_update_halo_kernel2_zvel_plus_2_bot( double* __restrict arg0, double* __restrict arg1, const int* __restrict arg2, int size0, int size1, int size2 ){ int idx_z = blockDim.z * blockIdx.z + threadIdx.z; int idx_y = blockDim.y * blockIdx.y + threadIdx.y; int idx_x = blockDim.x * blockIdx.x + threadIdx.x; arg0 += idx_x * 1 + idx_y * 1 * xdim0_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1 * xdim0_update_halo_kernel2_zvel_plus_2_bot * ydim0_update_halo_kernel2_zvel_plus_2_bot; arg1 += idx_x * 1 + idx_y * 1 * xdim1_update_halo_kernel2_zvel_plus_2_bot + idx_z * 1 * xdim1_update_halo_kernel2_zvel_plus_2_bot * ydim1_update_halo_kernel2_zvel_plus_2_bot; if (idx_x < size0 && idx_y < size1 && idx_z < size2) { update_halo_kernel2_zvel_plus_2_bot(arg0, arg1, arg2); } } // host stub function void ops_par_loop_update_halo_kernel2_zvel_plus_2_bot(char const *name, ops_block block, int dim, int* range, ops_arg arg0, ops_arg arg1, ops_arg arg2) { ops_arg args[3] = { arg0, arg1, arg2}; ops_timing_realloc(78,"update_halo_kernel2_zvel_plus_2_bot"); OPS_kernels[78].count++; //compute locally allocated range for the sub-block int start[3]; int end[3]; #ifdef OPS_MPI sub_block_list sb = OPS_sub_block_list[block->index]; if (!sb->owned) return; for ( int n=0; n<3; n++ ){ start[n] = sb->decomp_disp[n];end[n] = sb->decomp_disp[n]+sb->decomp_size[n]; if (start[n] >= range[2*n]) { start[n] = 0; } else { start[n] = range[2*n] - start[n]; } if (sb->id_m[n]==MPI_PROC_NULL && range[2*n] < 0) start[n] = range[2*n]; if (end[n] >= range[2*n+1]) { end[n] = range[2*n+1] - sb->decomp_disp[n]; } else { end[n] = sb->decomp_size[n]; } if (sb->id_p[n]==MPI_PROC_NULL && (range[2*n+1] > sb->decomp_disp[n]+sb->decomp_size[n])) end[n] += (range[2*n+1]-sb->decomp_disp[n]-sb->decomp_size[n]); } #else //OPS_MPI for ( int n=0; n<3; n++ ){ start[n] = range[2*n];end[n] = range[2*n+1]; } #endif //OPS_MPI int x_size = MAX(0,end[0]-start[0]); int y_size = MAX(0,end[1]-start[1]); int z_size = MAX(0,end[2]-start[2]); int xdim0 = args[0].dat->size[0]*args[0].dat->dim; int ydim0 = args[0].dat->size[1]; int xdim1 = args[1].dat->size[0]*args[1].dat->dim; int ydim1 = args[1].dat->size[1]; //Timing double t1,t2,c1,c2; ops_timers_core(&c2,&t2); if (xdim0 != xdim0_update_halo_kernel2_zvel_plus_2_bot_h || ydim0 != ydim0_update_halo_kernel2_zvel_plus_2_bot_h || xdim1 != xdim1_update_halo_kernel2_zvel_plus_2_bot_h || ydim1 != ydim1_update_halo_kernel2_zvel_plus_2_bot_h) { cudaMemcpyToSymbol( xdim0_update_halo_kernel2_zvel_plus_2_bot, &xdim0, sizeof(int) ); xdim0_update_halo_kernel2_zvel_plus_2_bot_h = xdim0; cudaMemcpyToSymbol( ydim0_update_halo_kernel2_zvel_plus_2_bot, &ydim0, sizeof(int) ); ydim0_update_halo_kernel2_zvel_plus_2_bot_h = ydim0; cudaMemcpyToSymbol( xdim1_update_halo_kernel2_zvel_plus_2_bot, &xdim1, sizeof(int) ); xdim1_update_halo_kernel2_zvel_plus_2_bot_h = xdim1; cudaMemcpyToSymbol( ydim1_update_halo_kernel2_zvel_plus_2_bot, &ydim1, sizeof(int) ); ydim1_update_halo_kernel2_zvel_plus_2_bot_h = ydim1; } int *arg2h = (int *)arg2.data; dim3 grid( (x_size-1)/OPS_block_size_x+ 1, (y_size-1)/OPS_block_size_y + 1, z_size); dim3 tblock(OPS_block_size_x,OPS_block_size_y,1); int consts_bytes = 0; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); reallocConstArrays(consts_bytes); consts_bytes = 0; arg2.data = OPS_consts_h + consts_bytes; arg2.data_d = OPS_consts_d + consts_bytes; for (int d=0; d<NUM_FIELDS; d++) ((int *)arg2.data)[d] = arg2h[d]; consts_bytes += ROUND_UP(NUM_FIELDS*sizeof(int)); mvConstArraysToDevice(consts_bytes); int dat0 = args[0].dat->elem_size; int dat1 = args[1].dat->elem_size; char *p_a[3]; //set up initial pointers int d_m[OPS_MAX_DIM]; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d] + OPS_sub_dat_list[args[0].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[0].dat->d_m[d]; #endif //OPS_MPI int base0 = dat0 * 1 * (start[0] * args[0].stencil->stride[0] - args[0].dat->base[0] - d_m[0]); base0 = base0+ dat0 * args[0].dat->size[0] * (start[1] * args[0].stencil->stride[1] - args[0].dat->base[1] - d_m[1]); base0 = base0+ dat0 * args[0].dat->size[0] * args[0].dat->size[1] * (start[2] * args[0].stencil->stride[2] - args[0].dat->base[2] - d_m[2]); p_a[0] = (char *)args[0].data_d + base0; #ifdef OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d] + OPS_sub_dat_list[args[1].dat->index]->d_im[d]; #else //OPS_MPI for (int d = 0; d < dim; d++) d_m[d] = args[1].dat->d_m[d]; #endif //OPS_MPI int base1 = dat1 * 1 * (start[0] * args[1].stencil->stride[0] - args[1].dat->base[0] - d_m[0]); base1 = base1+ dat1 * args[1].dat->size[0] * (start[1] * args[1].stencil->stride[1] - args[1].dat->base[1] - d_m[1]); base1 = base1+ dat1 * args[1].dat->size[0] * args[1].dat->size[1] * (start[2] * args[1].stencil->stride[2] - args[1].dat->base[2] - d_m[2]); p_a[1] = (char *)args[1].data_d + base1; ops_H_D_exchanges_device(args, 3); ops_halo_exchanges(args,3,range); ops_timers_core(&c1,&t1); OPS_kernels[78].mpi_time += t1-t2; //call kernel wrapper function, passing in pointers to data ops_update_halo_kernel2_zvel_plus_2_bot<<<grid, tblock >>> ( (double *)p_a[0], (double *)p_a[1], (int *)arg2.data_d,x_size, y_size, z_size); if (OPS_diags>1) { cutilSafeCall(cudaDeviceSynchronize()); } ops_timers_core(&c2,&t2); OPS_kernels[78].time += t2-t1; ops_set_dirtybit_device(args, 3); ops_set_halo_dirtybit3(&args[0],range); ops_set_halo_dirtybit3(&args[1],range); //Update kernel record OPS_kernels[78].transfer += ops_compute_transfer(dim, range, &arg0); OPS_kernels[78].transfer += ops_compute_transfer(dim, range, &arg1); }
f1d6bc5e59f5f87b9da604e12aa0909b621a17f7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "RipplingMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, problme de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diffrents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t) { RipplingMath ripplingMath = RipplingMath(w, h); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; int pixelI; // in [0,h[ int pixelJ; // in [0,w[ int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) ripplingMath.colorIJ(&color,pixelI, pixelJ, t); // update color ptrDevPixels[s] = color; s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
f1d6bc5e59f5f87b9da604e12aa0909b621a17f7.cu
#include "Indice2D.h" #include "cudaTools.h" #include "Device.h" #include "IndiceTools_GPU.h" #include "RipplingMath.h" using namespace gpu; // Attention : Choix du nom est impotant! // VagueDevice.cu et non Vague.cu // Dans ce dernier cas, probl�me de linkage, car le nom du .cu est le meme que le nom d'un .cpp (host) // On a donc ajouter Device (ou n'importequoi) pour que les noms soient diff�rents! /*----------------------------------------------------------------------*\ |* Declaration *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Imported *| \*-------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t); /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* Implementation *| \*---------------------------------------------------------------------*/ /*--------------------------------------*\ |* Public *| \*-------------------------------------*/ __global__ void rippling(uchar4* ptrDevPixels, uint w, uint h, float t) { RipplingMath ripplingMath = RipplingMath(w, h); const int TID = Indice2D::tid(); const int NB_THREAD = Indice2D::nbThread(); const int WH = w * h; uchar4 color; int pixelI; // in [0,h[ int pixelJ; // in [0,w[ int s = TID; while (s < WH) { IndiceTools::toIJ(s, w, &pixelI, &pixelJ); // update (pixelI, pixelJ) ripplingMath.colorIJ(&color,pixelI, pixelJ, t); // update color ptrDevPixels[s] = color; s += NB_THREAD; } } /*--------------------------------------*\ |* Private *| \*-------------------------------------*/ /*----------------------------------------------------------------------*\ |* End *| \*---------------------------------------------------------------------*/
a393cdc7e307ebe9be77230d411a5c6b278dc93d.hip
// !!! This is a file automatically generated by hipify!!! #include <hip/hip_runtime.h> #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void sumArrays(double* dA, double* dB, double* dC) { int ind = blockDim.x * blockIdx.x + threadIdx.x; dC[ind] = dA[ind] + dB[ind]; } int int_power(int base, int exponent) { int result = 1; for (int i = 0; i < exponent; i++) result *= base; return result; } double randBetween(int low, int high) { double result = (double)rand() / (double)RAND_MAX * (high - low) + low; return result; } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: ./p2 log2(N) nthreads\n"); exit(1); } int exponent = atoi(argv[1]); int N = int_power(2, exponent); // Number of random numbers int nthreads = atoi(argv[2]); // Number of threads per block int nblocks = N / nthreads; // Number of blocks // Allocate host arrays int bytes = sizeof(double) * N; double* hA, *hB, *hC, *refC, *difC; hA = (double*)malloc(bytes); hB = (double*)malloc(bytes); hC = (double*)malloc(bytes); refC = (double*)malloc(bytes); difC = (double*)malloc(bytes); // Allocate device arrays double* dA, *dB, *dC; hipMalloc(&dA, bytes); hipMalloc(&dB, bytes); hipMalloc(&dC, bytes); // Fill host arrays with random numbers between -10 and 10 and sum them for // reference srand(1443740650); for (int i = 0; i < N; i++) { hA[i] = randBetween(-10, 10); hB[i] = randBetween(-10, 10); refC[i] = hA[i] + hB[i]; } // Set up timing struct timespec start_in, end_in; float duration_ex; double duration_in, duration_ex_total; double duration_in_total = 0.0; long duration_in_ns; int num_runs = 0; // Loop until the total inclusive duration exceeds 1 second while (duration_in_total < 1000.0) { duration_ex_total = 0.0; duration_in_total = 0.0; // Double the number of runs if (num_runs != 0) num_runs *= 2; else num_runs = 1; for (int i = 0; i < num_runs; i++) { // CUDA timing variables hipEvent_t start_ex, end_ex; hipEventCreate(&start_ex); hipEventCreate(&end_ex); // Start inclusive timing clock_gettime(CLOCK_MONOTONIC, &start_in); // Copy host arrays to the device hipMemcpy(dA, hA, bytes, hipMemcpyHostToDevice); hipMemcpy(dB, hB, bytes, hipMemcpyHostToDevice); // Start exclusive timing hipEventRecord(start_ex, 0); // Invoke the device kernel which sums the arrays hipLaunchKernelGGL(( sumArrays) , dim3(nblocks), dim3(nthreads), 0, 0, dA, dB, dC); // End exclusive timing hipEventRecord(end_ex, 0); hipEventSynchronize(end_ex); // Copy the sum array back to the host hipMemcpy(hC, dC, bytes, hipMemcpyDeviceToHost); // End inclusive timing clock_gettime(CLOCK_MONOTONIC, &end_in); // Calculate durations hipEventElapsedTime(&duration_ex, start_ex, end_ex); hipEventDestroy(start_ex); hipEventDestroy(end_ex); duration_in_ns = (end_in.tv_sec - start_in.tv_sec) * 1000000000L + end_in.tv_nsec - start_in.tv_nsec; duration_in = (double)(duration_in_ns / 1000000.0); duration_ex_total += (double)duration_ex; duration_in_total += duration_in; } } // Calculate average durations over all runs duration_ex = duration_ex_total / num_runs; duration_in = duration_in_total / num_runs; // Calculate the difference between the sum arrays and find the maximum // absolute difference double max_dif = 0.0; for (int i = 0; i < N; i++) { difC[i] = hC[i] - refC[i]; if (abs(difC[i]) > max_dif) max_dif = abs(difC[i]); } // Free memory free(hA); free(hB); free(hC); free(refC); free(difC); hipFree(dA); hipFree(dB); hipFree(dC); // Print some information printf("Number of integers: %12d\n", N); printf("Threads per block: %12d\n", nthreads); printf("Number of runs: %12d\n", num_runs); printf("Maximum difference: %12.6e\n", max_dif); printf("Exclusive time: %12.6e ms\n", duration_ex); printf("Inclusive time: %12.6e ms\n", duration_in); printf("\n"); return 0; }
a393cdc7e307ebe9be77230d411a5c6b278dc93d.cu
#include <cuda.h> #include <stdio.h> #include <stdlib.h> #include <time.h> __global__ void sumArrays(double* dA, double* dB, double* dC) { int ind = blockDim.x * blockIdx.x + threadIdx.x; dC[ind] = dA[ind] + dB[ind]; } int int_power(int base, int exponent) { int result = 1; for (int i = 0; i < exponent; i++) result *= base; return result; } double randBetween(int low, int high) { double result = (double)rand() / (double)RAND_MAX * (high - low) + low; return result; } int main(int argc, char* argv[]) { if (argc != 3) { printf("Usage: ./p2 log2(N) nthreads\n"); exit(1); } int exponent = atoi(argv[1]); int N = int_power(2, exponent); // Number of random numbers int nthreads = atoi(argv[2]); // Number of threads per block int nblocks = N / nthreads; // Number of blocks // Allocate host arrays int bytes = sizeof(double) * N; double* hA, *hB, *hC, *refC, *difC; hA = (double*)malloc(bytes); hB = (double*)malloc(bytes); hC = (double*)malloc(bytes); refC = (double*)malloc(bytes); difC = (double*)malloc(bytes); // Allocate device arrays double* dA, *dB, *dC; cudaMalloc(&dA, bytes); cudaMalloc(&dB, bytes); cudaMalloc(&dC, bytes); // Fill host arrays with random numbers between -10 and 10 and sum them for // reference srand(1443740650); for (int i = 0; i < N; i++) { hA[i] = randBetween(-10, 10); hB[i] = randBetween(-10, 10); refC[i] = hA[i] + hB[i]; } // Set up timing struct timespec start_in, end_in; float duration_ex; double duration_in, duration_ex_total; double duration_in_total = 0.0; long duration_in_ns; int num_runs = 0; // Loop until the total inclusive duration exceeds 1 second while (duration_in_total < 1000.0) { duration_ex_total = 0.0; duration_in_total = 0.0; // Double the number of runs if (num_runs != 0) num_runs *= 2; else num_runs = 1; for (int i = 0; i < num_runs; i++) { // CUDA timing variables cudaEvent_t start_ex, end_ex; cudaEventCreate(&start_ex); cudaEventCreate(&end_ex); // Start inclusive timing clock_gettime(CLOCK_MONOTONIC, &start_in); // Copy host arrays to the device cudaMemcpy(dA, hA, bytes, cudaMemcpyHostToDevice); cudaMemcpy(dB, hB, bytes, cudaMemcpyHostToDevice); // Start exclusive timing cudaEventRecord(start_ex, 0); // Invoke the device kernel which sums the arrays sumArrays <<<nblocks, nthreads>>> (dA, dB, dC); // End exclusive timing cudaEventRecord(end_ex, 0); cudaEventSynchronize(end_ex); // Copy the sum array back to the host cudaMemcpy(hC, dC, bytes, cudaMemcpyDeviceToHost); // End inclusive timing clock_gettime(CLOCK_MONOTONIC, &end_in); // Calculate durations cudaEventElapsedTime(&duration_ex, start_ex, end_ex); cudaEventDestroy(start_ex); cudaEventDestroy(end_ex); duration_in_ns = (end_in.tv_sec - start_in.tv_sec) * 1000000000L + end_in.tv_nsec - start_in.tv_nsec; duration_in = (double)(duration_in_ns / 1000000.0); duration_ex_total += (double)duration_ex; duration_in_total += duration_in; } } // Calculate average durations over all runs duration_ex = duration_ex_total / num_runs; duration_in = duration_in_total / num_runs; // Calculate the difference between the sum arrays and find the maximum // absolute difference double max_dif = 0.0; for (int i = 0; i < N; i++) { difC[i] = hC[i] - refC[i]; if (abs(difC[i]) > max_dif) max_dif = abs(difC[i]); } // Free memory free(hA); free(hB); free(hC); free(refC); free(difC); cudaFree(dA); cudaFree(dB); cudaFree(dC); // Print some information printf("Number of integers: %12d\n", N); printf("Threads per block: %12d\n", nthreads); printf("Number of runs: %12d\n", num_runs); printf("Maximum difference: %12.6e\n", max_dif); printf("Exclusive time: %12.6e ms\n", duration_ex); printf("Inclusive time: %12.6e ms\n", duration_in); printf("\n"); return 0; }
85d9e1157d854935e3339a33f00a7ca8ae7cea59.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <hiprand/hiprand_kernel.h> #include <stdlib.h> #include <hip/hip_runtime.h> #include <sys/time.h> #include "transpose_relu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { hipSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *odata = NULL; hipMalloc(&odata, XSIZE*YSIZE); float *idata = NULL; hipMalloc(&idata, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); hipFree(0);hipLaunchKernelGGL(( transpose_relu), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height); hipDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) {hipLaunchKernelGGL(( transpose_relu), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) {hipLaunchKernelGGL(( transpose_relu), dim3(gridBlock),dim3(threadBlock), 0, 0, odata,idata,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
85d9e1157d854935e3339a33f00a7ca8ae7cea59.cu
#include <stdbool.h> #include <stdio.h> #include <string.h> #include <getopt.h> #include <curand_kernel.h> #include <stdlib.h> #include <cuda.h> #include <sys/time.h> #include "transpose_relu.cu" #include<chrono> #include<iostream> using namespace std; using namespace std::chrono; int blocks_[20][2] = {{8,8},{16,16},{24,24},{32,32},{1,64},{1,128},{1,192},{1,256},{1,320},{1,384},{1,448},{1,512},{1,576},{1,640},{1,704},{1,768},{1,832},{1,896},{1,960},{1,1024}}; int matrices_[7][2] = {{240,240},{496,496},{784,784},{1016,1016},{1232,1232},{1680,1680},{2024,2024}}; int main(int argc, char **argv) { cudaSetDevice(0); char* p;int matrix_len=strtol(argv[1], &p, 10); for(int matrix_looper=0;matrix_looper<matrix_len;matrix_looper++){ for(int block_looper=0;block_looper<20;block_looper++){ int XSIZE=matrices_[matrix_looper][0],YSIZE=matrices_[matrix_looper][1],BLOCKX=blocks_[block_looper][0],BLOCKY=blocks_[block_looper][1]; float *odata = NULL; cudaMalloc(&odata, XSIZE*YSIZE); float *idata = NULL; cudaMalloc(&idata, XSIZE*YSIZE); int width = XSIZE; int height = YSIZE; int iXSIZE= XSIZE; int iYSIZE= YSIZE; while(iXSIZE%BLOCKX!=0) { iXSIZE++; } while(iYSIZE%BLOCKY!=0) { iYSIZE++; } dim3 gridBlock(iXSIZE/BLOCKX, iYSIZE/BLOCKY); dim3 threadBlock(BLOCKX, BLOCKY); cudaFree(0); transpose_relu<<<gridBlock,threadBlock>>>(odata,idata,width,height); cudaDeviceSynchronize(); for (int loop_counter = 0; loop_counter < 10; ++loop_counter) { transpose_relu<<<gridBlock,threadBlock>>>(odata,idata,width,height); } auto start = steady_clock::now(); for (int loop_counter = 0; loop_counter < 1000; loop_counter++) { transpose_relu<<<gridBlock,threadBlock>>>(odata,idata,width,height); } auto end = steady_clock::now(); auto usecs = duration_cast<duration<float, microseconds::period> >(end - start); cout <<'['<<usecs.count()<<','<<'('<<BLOCKX<<','<<BLOCKY<<')' << ','<<'('<<XSIZE<<','<<YSIZE<<')'<<']' << endl; } }}
b840d4ca5b63b6772aace2f07ae788ac821849a4.hip
// !!! This is a file automatically generated by hipify!!! // System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <hip/hip_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" // setting the number of trials in the monte carlo simulation: #ifndef NUMTRIALS #define NUMTRIALS ( 1024*1024 ) #endif #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block #endif #define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE ) // ranges for the random numbers: const float XCMIN = 0.0; const float XCMAX = 2.0; const float YCMIN = 0.0; const float YCMAX = 2.0; const float RMIN = 0.5; const float RMAX = 2.0; // function prototypes: float Ranf( float, float ); int Ranf( int, int ); void TimeOfDaySeed( ); __global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits ) { unsigned int wgNumber = blockIdx.x; unsigned int wgDimension = blockDim.x; unsigned int threadNum = threadIdx.x; unsigned int gid = wgNumber*wgDimension + threadNum; // all the monte carlo stuff goes in here // if we make it all the way through, then Hits[gid] = 1 // randomize the location and radius of the circle: float xc = Xcs[gid]; float yc = Ycs[gid]; float r = Rs[gid]; float tn = tanf( (float)( (M_PI/180.) * 30. ) ); Hits[gid] = 0; // solve for the intersection using the quadratic formula: float a = 1.0 + tn*tn; float b = -2.0 * (xc + yc*tn); float c = xc*xc + yc*yc - r*r; float d = b*b - 4.0 * a*c; // cascading if-statements: // if you used "continue;" in project #1, change to this style because, // if there is no for-loop, then there is nowhere to continue to // CASE A if( d > 0.0 ) { // hits the circle: // get the first intersection: d = sqrt( d ); float t1 = (-b + d ) / ( 2.*a ); // time to intersect the circle float t2 = (-b - d ) / ( 2.*a ); // time to intersect the circle float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection // CASE B if( tmin > 0.0 ) { // where does it intersect the circle? float xcir = tmin; float ycir = tmin*tn; // get the unitized normal vector at the point of intersection: float nx = xcir - xc; float ny = ycir - yc; float n = sqrt( nx*nx + ny*ny ); nx /= n; // unit vector ny /= n; // unit vector // get the unitized incoming vector: float inx = xcir - 0.; float iny = ycir - 0.; float in = sqrt( inx*inx + iny*iny ); inx /= in; // unit vector iny /= in; // unit vector // get the outgoing (bounced) vector: float dot = inx*nx + iny*ny; //float outx = inx - 2.*nx*dot; // angle of reflection = angle of incidence` float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence` // find out if it hits the infinite plate: float t = ( 0. - ycir ) / outy; // CASE D if( t >= 0. ) { Hits[gid] = 1; } } } } // main program: int main( int argc, char* argv[ ] ) { TimeOfDaySeed( ); int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float *hXcs = new float[NUMTRIALS]; float *hYcs = new float[NUMTRIALS]; float * hRs = new float[NUMTRIALS]; int *hHits = new int[NUMTRIALS]; // fill the random-value arrays: for( int n = 0; n < NUMTRIALS; n++ ) { hXcs[n] = Ranf( XCMIN, XCMAX ); hYcs[n] = Ranf( YCMIN, YCMAX ); hRs[n] = Ranf( RMIN, RMAX ); } // allocate device memory: float *dXcs, *dYcs, *dRs; int *dHits; dim3 dimsXcs( NUMTRIALS, 1, 1 ); dim3 dimsYcs( NUMTRIALS, 1, 1 ); dim3 dimsRs( NUMTRIALS, 1, 1 ); dim3 dimsHits( NUMTRIALS, 1, 1 ); hipError_t status; status = hipMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = hipMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) ); checkCudaErrors( status ); // copy host memory to the device: status = hipMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); status = hipMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), hipMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid(NUMBLOCKS, 1, 1 ); // create and start timer hipDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: hipEvent_t start, stop; status = hipEventCreate( &start ); checkCudaErrors( status ); status = hipEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = hipEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: hipLaunchKernelGGL(( MonteCarlo), dim3(grid), dim3(threads) , 0, 0, dXcs, dYcs, dRs, dHits ); // record the stop event: status = hipEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = hipEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = hipEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double trialsPerSecond = (float)NUMTRIALS / secondsTotal; double megaTrialsPerSecond = trialsPerSecond / 1000000.; fprintf( stderr, "Number of Trials = %10d, MegaTrials/Second = %10.4lf\n", NUMTRIALS, megaTrialsPerSecond ); // copy result from the device to the host: status = hipMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), hipMemcpyDeviceToHost ); checkCudaErrors( status ); hipDeviceSynchronize( ); // compute the probability: int numHits = 0; for(int i = 0; i < NUMTRIALS; i++ ) { numHits += hHits[i]; } float probability = 100.f * (float)numHits / (float)NUMTRIALS; fprintf(stderr, "\nProbability = %6.3f %%\n", probability ); // clean up memory: delete [ ] hXcs; delete [ ] hYcs; delete [ ] hRs; delete [ ] hHits; status = hipFree( dXcs ); status = hipFree( dYcs ); status = hipFree( dRs ); status = hipFree( dHits ); checkCudaErrors( status ); return 0; } float Ranf( float low, float high ) { float r = (float) rand(); // 0 - RAND_MAX float t = r / (float) RAND_MAX; // 0. - 1. return low + t * ( high - low ); } int Ranf( int ilow, int ihigh ) { float low = (float)ilow; float high = ceil( (float)ihigh ); return (int) Ranf(low,high); } void TimeOfDaySeed( ) { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time( &timer ); double seconds = difftime( timer, mktime(&y2k) ); unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds srand( seed ); }
b840d4ca5b63b6772aace2f07ae788ac821849a4.cu
// System includes #include <stdio.h> #include <assert.h> #include <malloc.h> #include <math.h> #include <stdlib.h> // CUDA runtime #include <cuda_runtime.h> // Helper functions and utilities to work with CUDA #include "helper_functions.h" #include "helper_cuda.h" // setting the number of trials in the monte carlo simulation: #ifndef NUMTRIALS #define NUMTRIALS ( 1024*1024 ) #endif #ifndef BLOCKSIZE #define BLOCKSIZE 32 // number of threads per block #endif #define NUMBLOCKS ( NUMTRIALS / BLOCKSIZE ) // ranges for the random numbers: const float XCMIN = 0.0; const float XCMAX = 2.0; const float YCMIN = 0.0; const float YCMAX = 2.0; const float RMIN = 0.5; const float RMAX = 2.0; // function prototypes: float Ranf( float, float ); int Ranf( int, int ); void TimeOfDaySeed( ); __global__ void MonteCarlo( float *Xcs, float *Ycs, float *Rs, int *Hits ) { unsigned int wgNumber = blockIdx.x; unsigned int wgDimension = blockDim.x; unsigned int threadNum = threadIdx.x; unsigned int gid = wgNumber*wgDimension + threadNum; // all the monte carlo stuff goes in here // if we make it all the way through, then Hits[gid] = 1 // randomize the location and radius of the circle: float xc = Xcs[gid]; float yc = Ycs[gid]; float r = Rs[gid]; float tn = tanf( (float)( (M_PI/180.) * 30. ) ); Hits[gid] = 0; // solve for the intersection using the quadratic formula: float a = 1.0 + tn*tn; float b = -2.0 * (xc + yc*tn); float c = xc*xc + yc*yc - r*r; float d = b*b - 4.0 * a*c; // cascading if-statements: // if you used "continue;" in project #1, change to this style because, // if there is no for-loop, then there is nowhere to continue to // CASE A if( d > 0.0 ) { // hits the circle: // get the first intersection: d = sqrt( d ); float t1 = (-b + d ) / ( 2.*a ); // time to intersect the circle float t2 = (-b - d ) / ( 2.*a ); // time to intersect the circle float tmin = t1 < t2 ? t1 : t2; // only care about the first intersection // CASE B if( tmin > 0.0 ) { // where does it intersect the circle? float xcir = tmin; float ycir = tmin*tn; // get the unitized normal vector at the point of intersection: float nx = xcir - xc; float ny = ycir - yc; float n = sqrt( nx*nx + ny*ny ); nx /= n; // unit vector ny /= n; // unit vector // get the unitized incoming vector: float inx = xcir - 0.; float iny = ycir - 0.; float in = sqrt( inx*inx + iny*iny ); inx /= in; // unit vector iny /= in; // unit vector // get the outgoing (bounced) vector: float dot = inx*nx + iny*ny; //float outx = inx - 2.*nx*dot; // angle of reflection = angle of incidence` float outy = iny - 2.*ny*dot; // angle of reflection = angle of incidence` // find out if it hits the infinite plate: float t = ( 0. - ycir ) / outy; // CASE D if( t >= 0. ) { Hits[gid] = 1; } } } } // main program: int main( int argc, char* argv[ ] ) { TimeOfDaySeed( ); int dev = findCudaDevice(argc, (const char **)argv); // allocate host memory: float *hXcs = new float[NUMTRIALS]; float *hYcs = new float[NUMTRIALS]; float * hRs = new float[NUMTRIALS]; int *hHits = new int[NUMTRIALS]; // fill the random-value arrays: for( int n = 0; n < NUMTRIALS; n++ ) { hXcs[n] = Ranf( XCMIN, XCMAX ); hYcs[n] = Ranf( YCMIN, YCMAX ); hRs[n] = Ranf( RMIN, RMAX ); } // allocate device memory: float *dXcs, *dYcs, *dRs; int *dHits; dim3 dimsXcs( NUMTRIALS, 1, 1 ); dim3 dimsYcs( NUMTRIALS, 1, 1 ); dim3 dimsRs( NUMTRIALS, 1, 1 ); dim3 dimsHits( NUMTRIALS, 1, 1 ); cudaError_t status; status = cudaMalloc( (void **)(&dXcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dYcs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dRs), NUMTRIALS*sizeof(float) ); checkCudaErrors( status ); status = cudaMalloc( (void **)(&dHits), NUMTRIALS *sizeof(int) ); checkCudaErrors( status ); // copy host memory to the device: status = cudaMemcpy( dXcs, hXcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dYcs, hYcs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); status = cudaMemcpy( dRs, hRs, NUMTRIALS*sizeof(float), cudaMemcpyHostToDevice ); checkCudaErrors( status ); // setup the execution parameters: dim3 threads(BLOCKSIZE, 1, 1 ); dim3 grid(NUMBLOCKS, 1, 1 ); // create and start timer cudaDeviceSynchronize( ); // allocate CUDA events that we'll use for timing: cudaEvent_t start, stop; status = cudaEventCreate( &start ); checkCudaErrors( status ); status = cudaEventCreate( &stop ); checkCudaErrors( status ); // record the start event: status = cudaEventRecord( start, NULL ); checkCudaErrors( status ); // execute the kernel: MonteCarlo<<< grid, threads >>>( dXcs, dYcs, dRs, dHits ); // record the stop event: status = cudaEventRecord( stop, NULL ); checkCudaErrors( status ); // wait for the stop event to complete: status = cudaEventSynchronize( stop ); checkCudaErrors( status ); float msecTotal = 0.0f; status = cudaEventElapsedTime( &msecTotal, start, stop ); checkCudaErrors( status ); // compute and print the performance double secondsTotal = 0.001 * (double)msecTotal; double trialsPerSecond = (float)NUMTRIALS / secondsTotal; double megaTrialsPerSecond = trialsPerSecond / 1000000.; fprintf( stderr, "Number of Trials = %10d, MegaTrials/Second = %10.4lf\n", NUMTRIALS, megaTrialsPerSecond ); // copy result from the device to the host: status = cudaMemcpy( hHits, dHits, NUMTRIALS *sizeof(int), cudaMemcpyDeviceToHost ); checkCudaErrors( status ); cudaDeviceSynchronize( ); // compute the probability: int numHits = 0; for(int i = 0; i < NUMTRIALS; i++ ) { numHits += hHits[i]; } float probability = 100.f * (float)numHits / (float)NUMTRIALS; fprintf(stderr, "\nProbability = %6.3f %%\n", probability ); // clean up memory: delete [ ] hXcs; delete [ ] hYcs; delete [ ] hRs; delete [ ] hHits; status = cudaFree( dXcs ); status = cudaFree( dYcs ); status = cudaFree( dRs ); status = cudaFree( dHits ); checkCudaErrors( status ); return 0; } float Ranf( float low, float high ) { float r = (float) rand(); // 0 - RAND_MAX float t = r / (float) RAND_MAX; // 0. - 1. return low + t * ( high - low ); } int Ranf( int ilow, int ihigh ) { float low = (float)ilow; float high = ceil( (float)ihigh ); return (int) Ranf(low,high); } void TimeOfDaySeed( ) { struct tm y2k = { 0 }; y2k.tm_hour = 0; y2k.tm_min = 0; y2k.tm_sec = 0; y2k.tm_year = 100; y2k.tm_mon = 0; y2k.tm_mday = 1; time_t timer; time( &timer ); double seconds = difftime( timer, mktime(&y2k) ); unsigned int seed = (unsigned int)( 1000.*seconds ); // milliseconds srand( seed ); }
d1244baf53b7e6ffd0ff3d24e1a8dca161cace78.hip
// !!! This is a file automatically generated by hipify!!! #include <iostream> #include "cs_internal.h" #include "cudalib.h" #include <thrust/transform.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <bitset> namespace cu { __constant__ static float squad[10*HASHBITS]; static texture<float4, 1, hipReadModeElementType> spinTex; static texture<hashtype, 1, hipReadModeElementType> coordPartTex; __device__ inline static int getSign(float v0, float v1, float v2, float v3, int i) { i = i * 10; const float s0 = squad[i+0]; const float s1 = squad[i+1]; const float s2 = squad[i+2]; const float s3 = squad[i+3]; const float s4 = squad[i+4]; const float s5 = squad[i+5]; const float s6 = squad[i+6]; const float s7 = squad[i+7]; const float s8 = squad[i+8]; const float s9 = squad[i+9]; const float sum = (s0*v0+s1*v1+s2*v2+s3*v3) * v0 + (s1*v0+s4*v1+s5*v2+s6*v3) * v1 + (s2*v0+s5*v1+s7*v2+s8*v3) * v2 + (s3*v0+s6*v1+s8*v2+s9*v3) * v3; return (signbit(sum)) ? 0 : 1; //return signbit(sum); } struct ComputeHash { u_int32_t count; ComputeHash(size_t s) : count(s) { } __device__ hashtype operator()(const u_int32_t& index) { const float v0=tex1Dfetch(spinTex, index).x; const float v1=tex1Dfetch(spinTex, index).y; const float v2=tex1Dfetch(spinTex, index).z; const float v3=tex1Dfetch(spinTex, index).w; hashtype retval(0); int offset = 0; //for (size_t i=0;i<count;++i) for (u_int32_t i=0;i<count;++i) { retval ^= getSign(v0,v1,v2,v3, i) << offset; offset = (offset+1)& HASH_MASK; } return retval; } }; struct ReorderSpins { __device__ float4 operator()(const u_int32_t& _a) { return tex1Dfetch(spinTex, _a); } }; struct SpinHashChecker { __device__ u_int32_t operator()(const u_int32_t& index, const u_int32_t& value) { if (index == 0) return 0; u_int32_t v0 = tex1Dfetch(coordPartTex, index-1); u_int32_t v1 = tex1Dfetch(coordPartTex, index); if (v0 == v1) return value+1; return value; } }; struct SpinIndexCleaner { u_int32_t count; SpinIndexCleaner(size_t s) : count(s) { } __device__ bool operator()(const u_int32_t& index) { u_int32_t value = tex1Dfetch(coordPartTex, index); return value == count; } }; void compute_hash_part(thrust::device_vector<float4>& spins, const std::vector<float>& spinquadrics, thrust::device_vector<hashtype>& hashPart, int i, size_t& rem) { size_t toCpy = std::min<size_t>(rem, HASHBITS); cutilSafeCall( hipBindTexture(NULL, spinTex, thrust::raw_pointer_cast(spins.data()), sizeof(float4)*spins.size()) ); cutilSafeCall( hipMemcpyToSymbol(squad, thrust::raw_pointer_cast(spinquadrics.data())+i*10*HASHBITS, sizeof(float)*10*toCpy) ); thrust::transform(thrust::counting_iterator<u_int32_t>(0), thrust::counting_iterator<u_int32_t>(spins.size()), hashPart.begin(), ComputeHash(toCpy)); cutilSafeCall( hipUnbindTexture(spinTex) ); } void make_unique_spins(thrust::device_vector<float4>& spins, const std::vector<float>& spinquadrics, size_t spinquadCount) { size_t parts = inc_div<size_t>(spinquadrics.size()/10, HASHBITS); /* * - Sort & compute parts * - Reduce & compute parts */ thrust::device_vector<hashtype> hashPart(spins.size()); { size_t partsSizes; size_t rem = spinquadCount; for (int i=0;i<parts; ++i) { partsSizes = std::min<size_t>(rem, HASHBITS); rem -= HASHBITS; compute_hash_part(spins, spinquadrics, hashPart, i, partsSizes); thrust::stable_sort_by_key(hashPart.begin(), hashPart.end(), spins.begin()); } } //Make unique //Algo: Check if elem before has the same hash //if yes, +1 //Remove all that has counter equal to part count { thrust::device_vector<u_int32_t> hash_counter(spins.size(), 0); thrust::device_vector<u_int32_t> elemsIds(spins.size()); thrust::sequence(elemsIds.begin(), elemsIds.end()); size_t partsSizes; size_t rem = spinquadCount; for (int i=0;i<parts; ++i) { partsSizes = std::min<size_t>(rem, HASHBITS); rem -= HASHBITS; compute_hash_part(spins, spinquadrics, hashPart, i, partsSizes); cutilSafeCall( hipBindTexture(NULL, coordPartTex, thrust::raw_pointer_cast(hashPart.data()), sizeof(hashtype)*hashPart.size()) ); thrust::transform(thrust::counting_iterator<u_int32_t>(0), thrust::counting_iterator<u_int32_t>(spins.size()), hash_counter.begin(), hash_counter.begin(), SpinHashChecker()); cutilSafeCall( hipUnbindTexture(coordPartTex) ); } thrust::sequence(elemsIds.begin(), elemsIds.end()); cutilSafeCall( hipBindTexture(NULL, coordPartTex, thrust::raw_pointer_cast(hash_counter.data()), sizeof(hashtype)*hash_counter.size()) ); elemsIds.resize(thrust::remove_if(elemsIds.begin(), elemsIds.end(), SpinIndexCleaner(parts)) - elemsIds.begin()); cutilSafeCall( hipUnbindTexture(coordPartTex) ); //std::cerr << "Remaining: " << elemsIds.size() << std::endl; //Final Reorder thrust::device_vector<float4> final_spins(elemsIds.size()); cutilSafeCall( hipBindTexture(NULL, spinTex, thrust::raw_pointer_cast(spins.data()), sizeof(float4)*spins.size()) ); thrust::transform(elemsIds.begin(), elemsIds.end(), final_spins.begin(), ReorderSpins()); cutilSafeCall( hipUnbindTexture(spinTex) ); spins.swap(final_spins); } } }
d1244baf53b7e6ffd0ff3d24e1a8dca161cace78.cu
#include <iostream> #include "cs_internal.h" #include "cudalib.h" #include <thrust/transform.h> #include <thrust/device_vector.h> #include <thrust/sort.h> #include <thrust/remove.h> #include <bitset> namespace cu { __constant__ static float squad[10*HASHBITS]; static texture<float4, 1, cudaReadModeElementType> spinTex; static texture<hashtype, 1, cudaReadModeElementType> coordPartTex; __device__ inline static int getSign(float v0, float v1, float v2, float v3, int i) { i = i * 10; const float s0 = squad[i+0]; const float s1 = squad[i+1]; const float s2 = squad[i+2]; const float s3 = squad[i+3]; const float s4 = squad[i+4]; const float s5 = squad[i+5]; const float s6 = squad[i+6]; const float s7 = squad[i+7]; const float s8 = squad[i+8]; const float s9 = squad[i+9]; const float sum = (s0*v0+s1*v1+s2*v2+s3*v3) * v0 + (s1*v0+s4*v1+s5*v2+s6*v3) * v1 + (s2*v0+s5*v1+s7*v2+s8*v3) * v2 + (s3*v0+s6*v1+s8*v2+s9*v3) * v3; return (signbit(sum)) ? 0 : 1; //return signbit(sum); } struct ComputeHash { u_int32_t count; ComputeHash(size_t s) : count(s) { } __device__ hashtype operator()(const u_int32_t& index) { const float v0=tex1Dfetch(spinTex, index).x; const float v1=tex1Dfetch(spinTex, index).y; const float v2=tex1Dfetch(spinTex, index).z; const float v3=tex1Dfetch(spinTex, index).w; hashtype retval(0); int offset = 0; //for (size_t i=0;i<count;++i) for (u_int32_t i=0;i<count;++i) { retval ^= getSign(v0,v1,v2,v3, i) << offset; offset = (offset+1)& HASH_MASK; } return retval; } }; struct ReorderSpins { __device__ float4 operator()(const u_int32_t& _a) { return tex1Dfetch(spinTex, _a); } }; struct SpinHashChecker { __device__ u_int32_t operator()(const u_int32_t& index, const u_int32_t& value) { if (index == 0) return 0; u_int32_t v0 = tex1Dfetch(coordPartTex, index-1); u_int32_t v1 = tex1Dfetch(coordPartTex, index); if (v0 == v1) return value+1; return value; } }; struct SpinIndexCleaner { u_int32_t count; SpinIndexCleaner(size_t s) : count(s) { } __device__ bool operator()(const u_int32_t& index) { u_int32_t value = tex1Dfetch(coordPartTex, index); return value == count; } }; void compute_hash_part(thrust::device_vector<float4>& spins, const std::vector<float>& spinquadrics, thrust::device_vector<hashtype>& hashPart, int i, size_t& rem) { size_t toCpy = std::min<size_t>(rem, HASHBITS); cutilSafeCall( cudaBindTexture(NULL, spinTex, thrust::raw_pointer_cast(spins.data()), sizeof(float4)*spins.size()) ); cutilSafeCall( cudaMemcpyToSymbol(squad, thrust::raw_pointer_cast(spinquadrics.data())+i*10*HASHBITS, sizeof(float)*10*toCpy) ); thrust::transform(thrust::counting_iterator<u_int32_t>(0), thrust::counting_iterator<u_int32_t>(spins.size()), hashPart.begin(), ComputeHash(toCpy)); cutilSafeCall( cudaUnbindTexture(spinTex) ); } void make_unique_spins(thrust::device_vector<float4>& spins, const std::vector<float>& spinquadrics, size_t spinquadCount) { size_t parts = inc_div<size_t>(spinquadrics.size()/10, HASHBITS); /* * - Sort & compute parts * - Reduce & compute parts */ thrust::device_vector<hashtype> hashPart(spins.size()); { size_t partsSizes; size_t rem = spinquadCount; for (int i=0;i<parts; ++i) { partsSizes = std::min<size_t>(rem, HASHBITS); rem -= HASHBITS; compute_hash_part(spins, spinquadrics, hashPart, i, partsSizes); thrust::stable_sort_by_key(hashPart.begin(), hashPart.end(), spins.begin()); } } //Make unique //Algo: Check if elem before has the same hash //if yes, +1 //Remove all that has counter equal to part count { thrust::device_vector<u_int32_t> hash_counter(spins.size(), 0); thrust::device_vector<u_int32_t> elemsIds(spins.size()); thrust::sequence(elemsIds.begin(), elemsIds.end()); size_t partsSizes; size_t rem = spinquadCount; for (int i=0;i<parts; ++i) { partsSizes = std::min<size_t>(rem, HASHBITS); rem -= HASHBITS; compute_hash_part(spins, spinquadrics, hashPart, i, partsSizes); cutilSafeCall( cudaBindTexture(NULL, coordPartTex, thrust::raw_pointer_cast(hashPart.data()), sizeof(hashtype)*hashPart.size()) ); thrust::transform(thrust::counting_iterator<u_int32_t>(0), thrust::counting_iterator<u_int32_t>(spins.size()), hash_counter.begin(), hash_counter.begin(), SpinHashChecker()); cutilSafeCall( cudaUnbindTexture(coordPartTex) ); } thrust::sequence(elemsIds.begin(), elemsIds.end()); cutilSafeCall( cudaBindTexture(NULL, coordPartTex, thrust::raw_pointer_cast(hash_counter.data()), sizeof(hashtype)*hash_counter.size()) ); elemsIds.resize(thrust::remove_if(elemsIds.begin(), elemsIds.end(), SpinIndexCleaner(parts)) - elemsIds.begin()); cutilSafeCall( cudaUnbindTexture(coordPartTex) ); //std::cerr << "Remaining: " << elemsIds.size() << std::endl; //Final Reorder thrust::device_vector<float4> final_spins(elemsIds.size()); cutilSafeCall( cudaBindTexture(NULL, spinTex, thrust::raw_pointer_cast(spins.data()), sizeof(float4)*spins.size()) ); thrust::transform(elemsIds.begin(), elemsIds.end(), final_spins.begin(), ReorderSpins()); cutilSafeCall( cudaUnbindTexture(spinTex) ); spins.swap(final_spins); } } }
ee0e51b6d7a8d974cdace79385153bae4d8227c7.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" extern "C" __global__ __launch_bounds__(64) void sconv_bprop_C64_N64( float *param_test, float *param_O, const float *param_I, const float *param_F, float param_alpha, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_KRST, int param_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_R, int param_T, int param_magic_str_w, int param_shift_str_w, int param_magic_str_h, int param_shift_str_h, int param_magic_str_d, int param_shift_str_d) { __shared__ float share[64 * 8 * 4 + 8]; int tid = threadIdx.x; share[tid] = 1; }
ee0e51b6d7a8d974cdace79385153bae4d8227c7.cu
extern "C" __global__ __launch_bounds__(64) void sconv_bprop_C64_N64( float *param_test, float *param_O, const float *param_I, const float *param_F, float param_alpha, int param_N, int param_K, int param_D, int param_H, int param_W, int param_WN, int param_HWN, int param_DHWN, int param_C, int param_KRST, int param_RST, int param_RS, int param_magic_RS, int param_shift_RS, int param_S, int param_magic_S, int param_shift_S, int param_pad_d, int param_pad_h, int param_pad_w, int param_str_d, int param_str_h, int param_str_w, int param_Q, int param_PQ, int param_QN, int param_PQN, int param_MPQN, int param_magic_Q, int param_shift_Q, int param_magic_PQ, int param_shift_PQ, int param_R, int param_T, int param_magic_str_w, int param_shift_str_w, int param_magic_str_h, int param_shift_str_h, int param_magic_str_d, int param_shift_str_d) { __shared__ float share[64 * 8 * 4 + 8]; int tid = threadIdx.x; share[tid] = 1; }
33f606fa9af66342cecc506f3102cff076925bf0.hip
// !!! This is a file automatically generated by hipify!!! #include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/hip/HIPContext.h> #include <hip/hip_runtime.h> #include "macros.h" #include "cuda_utils.h" __global__ void grid_interp_cuda_kernel( const torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> vol, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> output, int channels, int3 nGrids, size_t size) { const int tx = blockIdx.x * blockDim.x + threadIdx.x; const int ty = blockIdx.y * blockDim.y + threadIdx.y; const int tz = blockIdx.z * blockDim.z + threadIdx.z; const int index = (tz * blockDim.y * gridDim.y + ty) * blockDim.x * gridDim.x + tx; if (index >= size) { return; } const float x = points[index][0]; const float y = points[index][1]; const float z = points[index][2]; const int ix = (int)x; const int iy = (int)y; const int iz = (int)z; const float fx = x - ix; const float fy = y - iy; const float fz = z - iz; for (int c = 0; c < channels; c++) { const int x0 = max(0, min(ix, nGrids.x - 1)); const int x1 = max(0, min(ix + 1, nGrids.x - 1)); const int y0 = max(0, min(iy, nGrids.y - 1)); const int y1 = max(0, min(iy + 1, nGrids.y - 1)); const int z0 = max(0, min(iz, nGrids.z - 1)); const int z1 = max(0, min(iz + 1, nGrids.z - 1)); const float v00 = (1.0 - fx) * vol[c][z0][y0][x0] + fx * vol[c][z0][y0][x1]; const float v01 = (1.0 - fx) * vol[c][z0][y1][x0] + fx * vol[c][z0][y1][x1]; const float v10 = (1.0 - fx) * vol[c][z1][y0][x0] + fx * vol[c][z1][y0][x1]; const float v11 = (1.0 - fx) * vol[c][z1][y1][x0] + fx * vol[c][z1][y1][x1]; const float v0 = (1.0 - fy) * v00 + fy * v01; const float v1 = (1.0 - fy) * v10 + fy * v11; output[index][c] = (1.0 - fz) * v0 + fz * v1; } } torch::Tensor grid_interp_cuda(torch::Tensor vol, torch::Tensor points) { // Check input tensors CHECK_CUDA(vol); CHECK_CONTIGUOUS(vol); CHECK_IS_FLOAT(vol); CHECK_N_DIM(vol, 4); CHECK_CUDA(points); CHECK_CONTIGUOUS(points); CHECK_IS_FLOAT(vol); CHECK_N_DIM(points, 2); // Size parameters const int Nx = vol.size(3); const int Ny = vol.size(2); const int Nz = vol.size(1); const int C = vol.size(0); const int Np = points.size(0); torch::Tensor output = torch::zeros({Np, C}, torch::TensorOptions().dtype(torch::kFloat32).device(vol.device())); auto vol_ascr = vol.packed_accessor32<float, 4, torch::RestrictPtrTraits>(); auto pts_ascr = points.packed_accessor32<float, 2, torch::RestrictPtrTraits>(); auto out_ascr = output.packed_accessor32<float, 2, torch::RestrictPtrTraits>(); const uint32_t MAX_THREADS_AXIS = 128; const uint32_t MAX_THREADS_AXIS2 = MAX_THREADS_AXIS * MAX_THREADS_AXIS; const uint32_t blockx = MAX_THREADS_AXIS; const uint32_t blocky = MAX_THREADS_AXIS; const uint32_t blockz = (Np + MAX_THREADS_AXIS2 - 1) / MAX_THREADS_AXIS2; const uint32_t BLOCK_SIZE = 8; const uint32_t gridx = (blockx + BLOCK_SIZE - 1) / BLOCK_SIZE; const uint32_t gridy = (blocky + BLOCK_SIZE - 1) / BLOCK_SIZE; const uint32_t gridz = (blockz + BLOCK_SIZE - 1) / BLOCK_SIZE; const int3 nGrids = make_int3(Nx, Ny, Nz); const dim3 blocks = { gridx, gridy, gridz }; const dim3 threads = { BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE }; const hipStream_t stream = at::hip::getCurrentHIPStreamMasqueradingAsCUDA(); hipLaunchKernelGGL(( grid_interp_cuda_kernel), dim3(blocks), dim3(threads), 0, stream, vol_ascr, pts_ascr, out_ascr, C, nGrids, Np); CUDA_CHECK_ERRORS(); return output; }
33f606fa9af66342cecc506f3102cff076925bf0.cu
#include <torch/extension.h> #include <ATen/ATen.h> #include <ATen/cuda/CUDAContext.h> #include <cuda_runtime.h> #include "macros.h" #include "cuda_utils.h" __global__ void grid_interp_cuda_kernel( const torch::PackedTensorAccessor32<float, 4, torch::RestrictPtrTraits> vol, const torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> points, torch::PackedTensorAccessor32<float, 2, torch::RestrictPtrTraits> output, int channels, int3 nGrids, size_t size) { const int tx = blockIdx.x * blockDim.x + threadIdx.x; const int ty = blockIdx.y * blockDim.y + threadIdx.y; const int tz = blockIdx.z * blockDim.z + threadIdx.z; const int index = (tz * blockDim.y * gridDim.y + ty) * blockDim.x * gridDim.x + tx; if (index >= size) { return; } const float x = points[index][0]; const float y = points[index][1]; const float z = points[index][2]; const int ix = (int)x; const int iy = (int)y; const int iz = (int)z; const float fx = x - ix; const float fy = y - iy; const float fz = z - iz; for (int c = 0; c < channels; c++) { const int x0 = max(0, min(ix, nGrids.x - 1)); const int x1 = max(0, min(ix + 1, nGrids.x - 1)); const int y0 = max(0, min(iy, nGrids.y - 1)); const int y1 = max(0, min(iy + 1, nGrids.y - 1)); const int z0 = max(0, min(iz, nGrids.z - 1)); const int z1 = max(0, min(iz + 1, nGrids.z - 1)); const float v00 = (1.0 - fx) * vol[c][z0][y0][x0] + fx * vol[c][z0][y0][x1]; const float v01 = (1.0 - fx) * vol[c][z0][y1][x0] + fx * vol[c][z0][y1][x1]; const float v10 = (1.0 - fx) * vol[c][z1][y0][x0] + fx * vol[c][z1][y0][x1]; const float v11 = (1.0 - fx) * vol[c][z1][y1][x0] + fx * vol[c][z1][y1][x1]; const float v0 = (1.0 - fy) * v00 + fy * v01; const float v1 = (1.0 - fy) * v10 + fy * v11; output[index][c] = (1.0 - fz) * v0 + fz * v1; } } torch::Tensor grid_interp_cuda(torch::Tensor vol, torch::Tensor points) { // Check input tensors CHECK_CUDA(vol); CHECK_CONTIGUOUS(vol); CHECK_IS_FLOAT(vol); CHECK_N_DIM(vol, 4); CHECK_CUDA(points); CHECK_CONTIGUOUS(points); CHECK_IS_FLOAT(vol); CHECK_N_DIM(points, 2); // Size parameters const int Nx = vol.size(3); const int Ny = vol.size(2); const int Nz = vol.size(1); const int C = vol.size(0); const int Np = points.size(0); torch::Tensor output = torch::zeros({Np, C}, torch::TensorOptions().dtype(torch::kFloat32).device(vol.device())); auto vol_ascr = vol.packed_accessor32<float, 4, torch::RestrictPtrTraits>(); auto pts_ascr = points.packed_accessor32<float, 2, torch::RestrictPtrTraits>(); auto out_ascr = output.packed_accessor32<float, 2, torch::RestrictPtrTraits>(); const uint32_t MAX_THREADS_AXIS = 128; const uint32_t MAX_THREADS_AXIS2 = MAX_THREADS_AXIS * MAX_THREADS_AXIS; const uint32_t blockx = MAX_THREADS_AXIS; const uint32_t blocky = MAX_THREADS_AXIS; const uint32_t blockz = (Np + MAX_THREADS_AXIS2 - 1) / MAX_THREADS_AXIS2; const uint32_t BLOCK_SIZE = 8; const uint32_t gridx = (blockx + BLOCK_SIZE - 1) / BLOCK_SIZE; const uint32_t gridy = (blocky + BLOCK_SIZE - 1) / BLOCK_SIZE; const uint32_t gridz = (blockz + BLOCK_SIZE - 1) / BLOCK_SIZE; const int3 nGrids = make_int3(Nx, Ny, Nz); const dim3 blocks = { gridx, gridy, gridz }; const dim3 threads = { BLOCK_SIZE, BLOCK_SIZE, BLOCK_SIZE }; const cudaStream_t stream = at::cuda::getCurrentCUDAStream(); grid_interp_cuda_kernel<<<blocks, threads, 0, stream>>>(vol_ascr, pts_ascr, out_ascr, C, nGrids, Np); CUDA_CHECK_ERRORS(); return output; }
bae5839f2888f8a8455c1a1b9213750919b89064.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include "../GpuDelaunay.h" #include<iomanip> #include<iostream> #include "KerCommon.h" #include "KerDivision.h" #include "KerPredicates.h" #include "ThrustWrapper.h" #include "../../Visualizer.h" //// // GpuDel methods //// void GpuDel::cleanup() { thrust_free_all(); _memPool.free(); _pointVec.free(); _constraintVec.free(); _triVec.free(); _oppVec.free(); _triInfoVec.free(); _orgPointIdx.free(); _vertTriVec.free(); _counters.free(); _actConsVec.free(); _orgFlipNum.clear(); _dPredWrapper.cleanup(); __circleCountVec.free(); __rejFlipVec.free(); _numActiveVec.clear(); _numFlipVec.clear(); _numCircleVec.clear(); _timeCheckVec.clear(); _timeFlipVec.clear(); } void GpuDel::compute ( const GDel2DInput& input, GDel2DOutput* output ) { // Set L1 for kernels hipDeviceSetCacheConfig( hipFuncCachePreferL1 ); _input = &input; _output = output; initProfiling(); startTiming( ProfNone ); initForFlip(); splitAndFlip(); outputToHost(); stopTiming( ProfNone, _output->stats.totalTime ); if ( _input->isProfiling( ProfDetail ) ) { std::cout << " FlipCompact time: "; _diagLogCompact.printTime(); std::cout << std::endl; std::cout << " FlipCollect time: "; _diagLogCollect.printTime(); std::cout << std::endl; } cleanup(); return; } void GpuDel::startTiming( ProfLevel level ) { if ( _input->isProfiling( level ) ) _profTimer[ level ].start(); } void GpuDel::pauseTiming( ProfLevel level ) { if ( _input->isProfiling( level ) ) _profTimer[ level ].pause(); } void GpuDel::stopTiming( ProfLevel level, double &accuTime ) { if ( _input->isProfiling( level ) ) { _profTimer[ level ].stop(); accuTime += _profTimer[ level ].value(); } } void GpuDel::restartTiming( ProfLevel level, double &accuTime ) { stopTiming( level, accuTime ); startTiming( level ); } struct CompareX { __device__ bool operator()( const Point2 &a, const Point2 &b ) const { return a._p[0] < b._p[0]; } }; struct Get2Ddist { Point2 _a; RealType abx, aby; Get2Ddist( const Point2 &a, const Point2 &b ) : _a(a) { abx = b._p[0] - a._p[0]; aby = b._p[1] - a._p[1]; } __device__ int operator()( const Point2 &c ) { RealType acx = c._p[0] - _a._p[0]; RealType acy = c._p[1] - _a._p[1]; RealType dist = abx * acy - aby * acx; return __float_as_int( fabs((float) dist) ); } }; RealType orient2dzero( const RealType *pa, const RealType *pb, const RealType *pc ); void GpuDel::constructInitialTriangles() { // First, choose two extreme points along the X axis typedef Point2DVec::iterator Point2DIter; thrust::pair< Point2DIter, Point2DIter > ret = thrust::minmax_element( _pointVec.begin(), _pointVec.end(), CompareX() ); int v0 = ret.first - _pointVec.begin(); int v1 = ret.second - _pointVec.begin(); const Point2 p0 = _pointVec[v0]; const Point2 p1 = _pointVec[v1]; // Find the furthest point from v0v1 IntDVec distVec = _memPool.allocateAny<int>( _pointNum ); distVec.resize( _pointVec.size() ); thrust::transform( _pointVec.begin(), _pointVec.end(), distVec.begin(), Get2Ddist( p0, p1 ) ); const int v2 = thrust::max_element( distVec.begin(), distVec.end() ) - distVec.begin(); const Point2 p2 = _pointVec[v2]; _memPool.release( distVec ); if ( _input->isProfiling( ProfDebug ) ) { std::cout << "Leftmost: " << v0 << " --> " << p0._p[0] << " " << p0._p[1] << std::endl; std::cout << "Rightmost: " << v1 << " --> " << p1._p[0] << " " << p1._p[1] << std::endl; std::cout << "Furthest 2D: " << v2 << " --> " << p2._p[0] << " " << p2._p[1] << std::endl; } // Check to make sure the 4 points are not co-planar RealType ori = orient2dzero( p0._p, p1._p, p2._p ); if ( ori == 0.0 ) { std::cout << "Input too degenerate!!!\n" << std::endl; exit(-1); } if ( ortToOrient( ori ) == OrientNeg ) std::swap( v0, v1 ); // Compute the centroid of v0v1v2v3, to be used as the kernel point. _ptInfty._p[0] = ( p0._p[0] + p1._p[0] + p2._p[0] ) / 3.0; _ptInfty._p[1] = ( p0._p[1] + p1._p[1] + p2._p[1] ) / 3.0; // Add the infinity point to the end of the list _infIdx = _pointNum - 1; _pointVec.resize( _pointNum ); _pointVec[ _infIdx ] = _ptInfty; if ( _input->isProfiling( ProfDiag ) ) { std::cout << "Kernel: " << _ptInfty._p[0] << " " << _ptInfty._p[1] << std::endl; } // Initialize the predicate wrapper!!! _dPredWrapper.init( toKernelPtr( _pointVec ), _pointNum, _input->noSort ? NULL : toKernelPtr( _orgPointIdx ), _infIdx ); setPredWrapperConstant( _dPredWrapper ); // Create the initial triangulation Tri firstTri = { v0, v1, v2 }; _triVec.expand( 4 ); _oppVec.expand( 4 ); _triInfoVec.expand( 4 ); // Put the initial tets at the Inf list hipLaunchKernelGGL(( kerMakeFirstTri), dim3(1), dim3(1) , 0, 0, toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), firstTri, _infIdx ); CudaCheckError(); // Locate initial positions of points _vertTriVec.resize( _pointNum ); IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum ); _counters.renew(); hipLaunchKernelGGL(( kerInitPointLocationFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTriVec ), toKernelPtr( exactCheckVec ), _counters.ptr(), firstTri ); hipLaunchKernelGGL(( kerInitPointLocationExact), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0, toKernelPtr( _vertTriVec ), toKernelPtr( exactCheckVec ), _counters.ptr(), firstTri ); CudaCheckError(); _memPool.release( exactCheckVec ); _availPtNum = _pointNum - 4; Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx ); } void GpuDel::initForFlip() { startTiming( ProfDefault ); _pointNum = _input->pointVec.size() + 1; // Plus the infinity point _triMax = (int) ( _pointNum * 2 ); // Copy points to GPU _pointVec.resize( _pointNum ); // 1 additional slot for the infinity point _pointVec.copyFromHost( _input->pointVec ); // Copy constraints to GPU _constraintVec.copyFromHost( _input->constraintVec ); // Allocate space _triVec.resize( _triMax ); _oppVec.resize( _triMax ); _triInfoVec.resize( _triMax ); _counters.init( CounterNum ); if ( _constraintVec.size() > 0 ) _actConsVec.resize( _constraintVec.size() ); if ( _input->isProfiling( ProfDiag ) ) { __circleCountVec.resize( _triMax ); __rejFlipVec.resize( _triMax ); } // Preallocate some buffers in the pool _memPool.reserve<FlipItem>( _triMax ); // flipVec _memPool.reserve<int2>( _triMax ); // triMsgVec _memPool.reserve<int>( _pointNum ); // vertSphereVec _memPool.reserve<int>( _triMax ); // actTriVec _memPool.reserve<int>( _triMax ); // Two more for common use _memPool.reserve<int>( _triMax ); // if ( _constraintVec.size() > 0 ) _memPool.reserve<int>( _triMax ); // Find the min and max coordinate value typedef thrust::device_ptr< RealType > RealPtr; RealPtr coords( ( RealType* ) toKernelPtr( _pointVec ) ); thrust::pair< RealPtr, RealPtr> ret = thrust::minmax_element( coords, coords + _pointVec.size() * 2 ); _minVal = *ret.first; _maxVal = *ret.second; if ( _input->isProfiling( ProfDebug ) ) { std::cout << "_minVal = " << _minVal << ", _maxVal == " << _maxVal << std::endl; } // Sort points along space curve if ( !_input->noSort ) { stopTiming( ProfDefault, _output->stats.initTime ); startTiming( ProfDefault ); IntDVec valueVec = _memPool.allocateAny<int>( _pointNum ); valueVec.resize( _pointVec.size() ); _orgPointIdx.resize( _pointNum ); thrust::sequence( _orgPointIdx.begin(), _orgPointIdx.end(), 0 ); thrust_transform_GetMortonNumber( _pointVec.begin(), _pointVec.end(), valueVec.begin(), _minVal, _maxVal ); thrust_sort_by_key( valueVec.begin(), valueVec.end(), make_zip_iterator( make_tuple( _orgPointIdx.begin(), _pointVec.begin() ) ) ); _memPool.release( valueVec ); stopTiming( ProfDefault, _output->stats.sortTime ); startTiming( ProfDefault ); } // Create first upper-lower triangles constructInitialTriangles(); stopTiming( ProfDefault, _output->stats.initTime ); return; } void GpuDel::doFlippingLoop( CheckDelaunayMode checkMode ) { startTiming( ProfDefault ); _flipVec = _memPool.allocateAny<FlipItem>( _triMax ); _triMsgVec = _memPool.allocateAny<int2>( _triMax ); _actTriVec = _memPool.allocateAny<int>( _triMax ); _triMsgVec.assign( _triMax, make_int2( -1, -1 ) ); int flipLoop = 0; _actTriMode = ActTriMarkCompact; _diagLog = &_diagLogCompact; while ( doFlipping( checkMode ) ) ++flipLoop; stopTiming( ProfDefault, _output->stats.flipTime ); relocateAll(); _memPool.release( _triMsgVec ); _memPool.release( _flipVec ); _memPool.release( _actTriVec ); } void GpuDel::initProfiling() { _output->stats.reset(); _diagLogCompact.reset(); _diagLogCollect.reset(); _numActiveVec.clear(); _numFlipVec.clear(); _timeCheckVec.clear(); _timeFlipVec.clear(); } void GpuDel::initForConstraintInsertion() { if ( !_input->noSort ) { // Update vertex indices of constraints IntDVec mapVec = _memPool.allocateAny<int>( _pointNum ); mapVec.resize( _pointNum ); thrust_scatterSequenceMap( _orgPointIdx, mapVec ); thrust::device_ptr<int> segInt( (int *) toKernelPtr( _constraintVec ) ); thrust::gather( segInt, segInt + _constraintVec.size() * 2, mapVec.begin(), segInt ); _memPool.release( mapVec ); // // Sort the constraints // const int constraintNum = _constraintVec.size(); // IntDVec keyVec = _memPool.allocateAny<int>( constraintNum ); // keyVec.resize( constraintNum ); // thrust::transform( _constraintVec.begin(), _constraintVec.end(), keyVec.begin(), GetConstraintMinVert() ); // thrust::sort_by_key( keyVec.begin(), keyVec.end(), _constraintVec.begin() ); // _memPool.release( keyVec ); } // Construct _vertTriVec.resize( _pointNum ); hipLaunchKernelGGL(( kerMapTriToVert), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _triVec ), toKernelPtr( _vertTriVec ) ); CudaCheckError(); // Initialize list of active constraints thrust::sequence( _actConsVec.begin(), _actConsVec.end() ); } bool GpuDel::markIntersections() { _counters.renew(); hipLaunchKernelGGL(( kerMarkTriConsIntersectionFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _actConsVec ), toKernelPtr( _constraintVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _vertTriVec ), toKernelPtr( _triConsVec ), _counters.ptr() ); hipLaunchKernelGGL(( kerMarkTriConsIntersectionExact), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _actConsVec ), toKernelPtr( _constraintVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _vertTriVec ), toKernelPtr( _triConsVec ), _counters.ptr() ); CudaCheckError(); return ( _counters[ CounterFlag ] == 1 ); } void GpuDel::updatePairStatus() { IntDVec exactVec = _memPool.allocateAny<int>( _triMax ); _counters.renew(); hipLaunchKernelGGL(( kerUpdatePairStatusFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( exactVec ), _counters.ptr() ); hipLaunchKernelGGL(( kerUpdatePairStatusExact), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( exactVec ), _counters.ptr() ); CudaCheckError(); _memPool.release( exactVec ); } void GpuDel::checkConsFlipping( IntDVec& triVoteVec ) { IntDVec exactVec = _memPool.allocateAny<int>( _triMax ); _counters.renew(); hipLaunchKernelGGL(( kerCheckConsFlippingFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( triVoteVec ), toKernelPtr( exactVec ), _counters.ptr() ); hipLaunchKernelGGL(( kerCheckConsFlippingExact), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( triVoteVec ), toKernelPtr( exactVec ), _counters.ptr() ); CudaCheckError(); _memPool.release( exactVec ); } bool GpuDel::doConsFlipping( int &flipNum ) { const int triNum = _triVec.size(); const int actNum = _actTriVec.size(); /////// // Vote for flips /////// #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) __rejFlipVec.assign( triNum, 0 ); #pragma endregion updatePairStatus(); IntDVec triVoteVec = _memPool.allocateAny<int>( _triMax ); triVoteVec.assign( triNum, INT_MAX ); checkConsFlipping( triVoteVec ); //// // Mark rejected flips //// IntDVec flipToTri = _memPool.allocateAny<int>( _triMax ); flipToTri.resize( actNum ); hipLaunchKernelGGL(( kerMarkRejectedConsFlips), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( triVoteVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _oppVec ), toKernelPtr( flipToTri ), _input->isProfiling( ProfDiag ) ? toKernelPtr( __rejFlipVec ) : NULL ); CudaCheckError(); _memPool.release( triVoteVec ); //// // Compact flips //// IntDVec temp = _memPool.allocateAny<int>( _triMax, true ); flipNum = compactIfNegative( flipToTri, temp ); if ( 0 == flipNum ) { _memPool.release( flipToTri ); return false; } //// // Expand flip vector //// int orgFlipNum = _flipVec.size(); int expFlipNum = orgFlipNum + flipNum; if ( expFlipNum > _flipVec.capacity() ) { _flipVec.resize( 0 ); _triMsgVec.assign( _triMax, make_int2( -1, -1 ) ); orgFlipNum = 0; expFlipNum = flipNum; } _flipVec.grow( expFlipNum ); // See doFlipping _triMsgVec.resize( _triVec.size() ); //// // Flipping //// #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { const int rejFlipNum = thrust_sum( __rejFlipVec ); std::cout << " ConsFlips: " << flipNum << " ( " << rejFlipNum << " )" << std::endl; } #pragma endregion // 32 ThreadsPerBlock is optimal hipLaunchKernelGGL(( kerFlip), dim3(BlocksPerGrid), dim3(32) , 0, 0, toKernelArray( flipToTri ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), NULL, toKernelPtr( _triMsgVec ), NULL, toKernelPtr( _flipVec ), toKernelPtr( _triConsVec ), toKernelPtr( _vertTriVec ), orgFlipNum, 0 ); CudaCheckError(); //// // Update oppTri //// hipLaunchKernelGGL(( kerUpdateOpp), dim3(BlocksPerGrid), dim3(32) , 0, 0, toKernelPtr( _flipVec ) + orgFlipNum, toKernelPtr( _oppVec ), toKernelPtr( _triMsgVec ), toKernelPtr( flipToTri ), orgFlipNum, flipNum ); CudaCheckError(); _memPool.release( flipToTri ); ///////////////////////////////////////////////////////////////////// return true; } void GpuDel::doInsertConstraints() { startTiming( ProfDefault ); initForConstraintInsertion(); const int triNum = _triVec.size(); _triConsVec = _memPool.allocateAny<int>( triNum ); _triConsVec.assign( triNum, -1 ); _flipVec = _memPool.allocateAny<FlipItem>( _triMax ); _triMsgVec = _memPool.allocateAny<int2>( _triMax ); _actTriVec = _memPool.allocateAny<int>( _triMax ); _triMsgVec.assign( _triMax, make_int2( -1, -1 ) ); int outerLoop = 0; int flipLoop = 0; int totFlipNum = 0; int flipNum; while ( markIntersections() ) { if ( _input->isProfiling( ProfDiag ) ) std::cout << "Iter " << ( outerLoop+1 ) << std::endl; // VISUALIZATION if ( Visualizer::instance()->isEnable() ) { pauseTiming( ProfNone ); pauseTiming( ProfDefault ); IntHVec triColorVec; _triConsVec.copyToHost( triColorVec ); for ( int i = 0; i < triColorVec.size(); ++i ) if ( triColorVec[i] != -1 ) triColorVec[i] >>= 4; Visualizer::instance()->addFrame( _pointVec, _constraintVec, _triVec, triColorVec, _infIdx ); startTiming( ProfDefault ); startTiming( ProfNone ); } // Collect active triangles thrust_copyIf_IsNotNegative( _triConsVec, _actTriVec ); int innerLoop = 0; while ( doConsFlipping( flipNum ) ) { totFlipNum += flipNum; // VISUALIZATION if ( Visualizer::instance()->isEnable() ) { pauseTiming( ProfNone ); pauseTiming( ProfDefault ); IntHVec triColorVec; _triConsVec.copyToHost( triColorVec ); for ( int i = 0; i < triColorVec.size(); ++i ) if ( triColorVec[i] != -1 ) triColorVec[i] >>= 4; Visualizer::instance()->addFrame( _pointVec, _constraintVec, _triVec, triColorVec, _infIdx ); startTiming( ProfDefault ); startTiming( ProfNone ); } ++flipLoop; ++innerLoop; if ( innerLoop == 5 ) break; //if ( flipLoop == 1 ) break; } ++outerLoop; // Mark all the possibly modified triangles as Alive + Changed (3). thrust_scatterConstantMap( _actTriVec, _triInfoVec, 3 ); //if ( outerLoop == 5 ) break; } //if ( outerLoop >= 20 ) //{ // for ( int i = 0; i < _actTriVec.size(); ++i ) // std::cout << _actTriVec[i] << " "; // std::cout << std::endl; //} if ( _input->isProfiling( ProfDiag ) ) std::cout << "ConsFlip: Outer loop = " << outerLoop << ", inner loop = " << flipLoop << ", total flip = " << totFlipNum << std::endl; _memPool.release( _triConsVec ); _memPool.release( _triMsgVec ); _memPool.release( _actTriVec ); _memPool.release( _flipVec ); stopTiming( ProfDefault, _output->stats.constraintTime ); } void GpuDel::splitAndFlip() { int insLoop = 0; _doFlipping = !_input->insAll; ////////////////// while ( _availPtNum > 0 ) ////////////////// { //////////////////////// splitTri(); //////////////////////// if ( _doFlipping ) doFlippingLoop( CircleFastOrientFast ); ++insLoop; } ////////////////////////////// if ( !_doFlipping ) doFlippingLoop( CircleFastOrientFast ); markSpecialTris(); doFlippingLoop( CircleExactOrientSoS ); ////////////////////////////// // Insert constraints if needed if ( _constraintVec.size() > 0 ) doInsertConstraints(); doFlippingLoop( CircleFastOrientFast ); markSpecialTris(); doFlippingLoop( CircleExactOrientSoS ); #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { std::cout << "\nInsert loops: " << insLoop << std::endl; std::cout << "Compact: " << std::endl; _diagLogCompact.printCount(); std::cout << "Collect: " << std::endl; _diagLogCollect.printCount(); } #pragma endregion return; } void GpuDel::markSpecialTris() { startTiming( ProfDetail ); hipLaunchKernelGGL(( kerMarkSpecialTris), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _triInfoVec ), toKernelPtr( _oppVec ) ); CudaCheckError(); stopTiming( ProfDetail, _diagLog->_t[ 0 ] ); } void GpuDel::expandTri( int newTriNum ) { //*** Expand triangles _triVec.expand( newTriNum ); _oppVec.expand( newTriNum ); _triInfoVec.expand( newTriNum ); } void GpuDel::splitTri() { const int MaxSamplePerTri = 100; startTiming( ProfDefault ); //// // Rank points //// int triNum = _triVec.size(); int noSample = _pointNum; if ( noSample / triNum > MaxSamplePerTri ) noSample = triNum * MaxSamplePerTri; IntDVec triCircleVec = _memPool.allocateAny<int>( _triMax ); triCircleVec.assign( triNum, INT_MIN ); IntDVec vertCircleVec = _memPool.allocateAny<int>( _pointNum ); vertCircleVec.resize( noSample ); hipLaunchKernelGGL(( kerVoteForPoint), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTriVec ), toKernelPtr( _triVec ), toKernelPtr( vertCircleVec ), toKernelPtr( triCircleVec ), noSample ); CudaCheckError(); IntDVec triToVert = _memPool.allocateAny<int>( _triMax ); triToVert.assign( triNum, INT_MAX ); hipLaunchKernelGGL(( kerPickWinnerPoint), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTriVec ), toKernelPtr( vertCircleVec ), toKernelPtr( triCircleVec ), toKernelPtr( triToVert ), noSample ); CudaCheckError(); _memPool.release( vertCircleVec ); _memPool.release( triCircleVec ); //// // Collect triangles with insertions //// IntDVec splitTriVec = _memPool.allocateAny<int>( _pointNum ); _insNum = thrust_copyIf_TriHasVert( triToVert, splitTriVec ); const int extraTriNum = DIM * _insNum; const int splitTriNum = triNum + extraTriNum; if ( _input->isProfiling( ProfDiag ) ) { std::cout << "Insert: " << _insNum << " Tri from: " << triNum << " to: " << splitTriNum << std::endl; } // If there's just a few points if ( _availPtNum - _insNum < _insNum && _insNum < 0.1 * _pointNum ) { _doFlipping = false; //std::cout << "Stop flipping!" << std::endl; } if ( !_input->noReorder && _doFlipping ) { stopTiming( ProfDefault, _output->stats.splitTime ); shiftTri( triToVert, splitTriVec ); triNum = -1; // Mark that we have shifted the array startTiming( ProfDefault ); } //// // Make map //// IntDVec insTriMap = _memPool.allocateAny<int>( _triMax ); insTriMap.assign( ( triNum < 0 ) ? splitTriNum : triNum, -1 ); thrust_scatterSequenceMap( splitTriVec, insTriMap ); //// // Expand if space needed //// expandTri( splitTriNum ); //// // Update the location of the points //// stopTiming( ProfDefault, _output->stats.splitTime ); startTiming( ProfDefault ); IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum ); _counters.renew(); hipLaunchKernelGGL(( kerSplitPointsFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTriVec ), toKernelPtr( triToVert ), toKernelPtr( _triVec ), toKernelPtr( insTriMap ), toKernelPtr( exactCheckVec ), _counters.ptr(), triNum, _insNum ); hipLaunchKernelGGL(( kerSplitPointsExactSoS), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0, toKernelPtr( _vertTriVec ), toKernelPtr( triToVert ), toKernelPtr( _triVec ), toKernelPtr( insTriMap ), toKernelPtr( exactCheckVec ), _counters.ptr(), triNum, _insNum ); CudaCheckError(); _memPool.release( exactCheckVec ); stopTiming( ProfDefault, _output->stats.relocateTime ); startTiming( ProfDefault ); //// // Split old into new triangle and copy them to new array //// hipLaunchKernelGGL(( kerSplitTri), dim3(BlocksPerGrid), dim3(32) , 0, 0, toKernelArray( splitTriVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( insTriMap ), toKernelPtr( triToVert ), triNum, _insNum ); CudaCheckError(); _memPool.release( triToVert ); _memPool.release( insTriMap ); _memPool.release( splitTriVec ); _availPtNum -= _insNum; stopTiming( ProfDefault, _output->stats.splitTime ); Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx ); return; } bool GpuDel::doFlipping( CheckDelaunayMode checkMode ) { startTiming( ProfDetail ); ++_diagLog->_flipLoop; const int triNum = _triVec.size(); //// // Compact active triangles //// switch ( _actTriMode ) { case ActTriMarkCompact: thrust_copyIf_IsActiveTri( _triInfoVec, _actTriVec ); break; case ActTriCollectCompact: IntDVec temp = _memPool.allocateAny<int>( _triMax, true ); compactIfNegative( _actTriVec, temp ); break; } int orgActNum = _actTriVec.size(); #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { _numActiveVec.push_back( orgActNum ); if ( orgActNum == 0 || ( checkMode != CircleExactOrientSoS && orgActNum < PredBlocksPerGrid * PredThreadsPerBlock ) ) { _numFlipVec.push_back( 0 ); _timeCheckVec.push_back( 0.0 ); _timeFlipVec.push_back( 0.0 ); _numCircleVec.push_back( 0 ); } } #pragma endregion restartTiming( ProfDetail, _diagLog->_t[ 0 ] ); ///////////////////////////////////////////////////////////////////// //// // Check actNum, switch mode or quit if necessary //// // No more work if ( 0 == orgActNum ) return false; // Little work, leave it for the Exact iterations if ( checkMode != CircleExactOrientSoS && orgActNum < PredBlocksPerGrid * PredThreadsPerBlock ) return false; // See if there's little work enough to switch to collect mode. // Safety check: make sure there's enough space to collect if ( orgActNum < BlocksPerGrid * ThreadsPerBlock && orgActNum * 2 < _actTriVec.capacity() && orgActNum * 2 < triNum ) { _actTriMode = ActTriCollectCompact; _diagLog = &_diagLogCollect; } else { _actTriMode = ActTriMarkCompact; _diagLog = &_diagLogCompact; } //// // Vote for flips //// #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { __circleCountVec.assign( triNum, 0 ); __rejFlipVec.assign( triNum, 0 ); } #pragma endregion IntDVec triVoteVec = _memPool.allocateAny<int>( _triMax ); triVoteVec.assign( triNum, INT_MAX ); dispatchCheckDelaunay( checkMode, orgActNum, triVoteVec ); double prevTime = _diagLog->_t[ 1 ]; restartTiming( ProfDetail, _diagLog->_t[ 1 ] ); ///////////////////////////////////////////////////////////////////// //// // Mark rejected flips //// IntDVec flipToTri = _memPool.allocateAny<int>( _triMax ); flipToTri.resize( orgActNum ); hipLaunchKernelGGL(( kerMarkRejectedFlips), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _actTriVec ), toKernelPtr( _oppVec ), toKernelPtr( triVoteVec ), toKernelPtr( _triInfoVec ), toKernelPtr( flipToTri ), orgActNum, _input->isProfiling( ProfDiag ) ? toKernelPtr( __rejFlipVec ) : NULL ); CudaCheckError(); _memPool.release( triVoteVec ); restartTiming( ProfDetail, _diagLog->_t[ 2 ] ); ///////////////////////////////////////////////////////////////////// //// // Compact flips //// IntDVec temp = _memPool.allocateAny<int>( _triMax, true ); const int flipNum = compactIfNegative( flipToTri, temp ); if ( _input->isProfiling( ProfDiag ) ) { _numFlipVec.push_back( flipNum ); _timeCheckVec.push_back( _diagLog->_t[ 1 ] - prevTime ); } restartTiming( ProfDetail, _diagLog->_t[ 3 ] ); ///////////////////////////////////////////////////////////////////// //// // Preparation for the actual flipping. Include several steps //// #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { const int circleNum = thrust_sum( __circleCountVec ); _diagLog->_circleCount += circleNum; const int rejFlipNum = thrust_sum( __rejFlipVec ); _diagLog->_rejFlipCount += rejFlipNum; _diagLog->_totFlipNum += flipNum; std::cout << "Acts: " << orgActNum << " Flips: " << flipNum << " ( " << rejFlipNum << " )" << " circle: " << circleNum << " Exact: " << ( checkMode == CircleExactOrientSoS ? _counters[ CounterExact ] : -1 ) << std::endl; _numCircleVec.push_back( circleNum ); startTiming( ProfDetail ); } #pragma endregion if ( 0 == flipNum ) { _numCircleVec.push_back( 0 ); _timeFlipVec.push_back( 0 ); _memPool.release( flipToTri ); return false; } // Expand flip vector int orgFlipNum = _flipVec.size(); int expFlipNum = orgFlipNum + flipNum; if ( expFlipNum > _flipVec.capacity() ) { stopTiming( ProfDetail, _diagLog->_t[ 4 ] ); stopTiming( ProfDefault, _output->stats.flipTime ); relocateAll(); startTiming( ProfDefault ); startTiming( ProfDetail ); orgFlipNum = 0; expFlipNum = flipNum; } _flipVec.grow( expFlipNum ); // _triMsgVec contains two components. // - .x is the encoded new neighbor information // - .y is the flipIdx as in the flipVec (i.e. globIdx) // As such, we do not need to initialize it to -1 to // know which tris are not flipped in the current rount. // We can rely on the flipIdx being > or < than orgFlipIdx. // Note that we have to initialize everything to -1 // when we clear the flipVec and reset the flip indexing. // _triMsgVec.resize( _triVec.size() ); //// // Expand active tri vector //// if ( _actTriMode == ActTriCollectCompact ) _actTriVec.grow( orgActNum + flipNum ); restartTiming( ProfDetail, _diagLog->_t[ 4 ] ); ///////////////////////////////////////////////////////////////////// //// // Flipping //// // 32 ThreadsPerBlock is optimal hipLaunchKernelGGL(( kerFlip), dim3(BlocksPerGrid), dim3(32) , 0, 0, toKernelArray( flipToTri ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _triMsgVec ), ( _actTriMode == ActTriCollectCompact ) ? toKernelPtr( _actTriVec ) : NULL, toKernelPtr( _flipVec ), NULL, NULL, orgFlipNum, orgActNum ); CudaCheckError(); _orgFlipNum.push_back( orgFlipNum ); //// // Update oppTri //// hipLaunchKernelGGL(( kerUpdateOpp), dim3(BlocksPerGrid), dim3(32) , 0, 0, toKernelPtr( _flipVec ) + orgFlipNum, toKernelPtr( _oppVec ), toKernelPtr( _triMsgVec ), toKernelPtr( flipToTri ), orgFlipNum, flipNum ); CudaCheckError(); _memPool.release( flipToTri ); prevTime = _diagLog->_t[ 5 ]; stopTiming( ProfDetail, _diagLog->_t[ 5 ] ); if ( _input->isProfiling( ProfDiag ) ) _timeFlipVec.push_back( _diagLog->_t[ 5 ] - prevTime ); ///////////////////////////////////////////////////////////////////// Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx ); return true; } void GpuDel::dispatchCheckDelaunay ( CheckDelaunayMode checkMode, int orgActNum, IntDVec& triVoteVec ) { switch ( checkMode ) { case CircleFastOrientFast: hipLaunchKernelGGL(( kerCheckDelaunayFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _actTriVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( triVoteVec ), orgActNum, _input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL ); CudaCheckError(); break; case CircleExactOrientSoS: // Reuse this array to save memory Int2DVec &exactCheckVi = _triMsgVec; _counters.renew(); hipLaunchKernelGGL(( kerCheckDelaunayExact_Fast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _actTriVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( triVoteVec ), toKernelPtr( exactCheckVi ), orgActNum, _counters.ptr(), _input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL ); hipLaunchKernelGGL(( kerCheckDelaunayExact_Exact), dim3(PredBlocksPerGrid), dim3(PredThreadsPerBlock) , 0, 0, toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( triVoteVec ), toKernelPtr( exactCheckVi ), _counters.ptr(), _input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL ); CudaCheckError(); break; } } template< typename T > __global__ void kerShift ( KerIntArray shiftVec, T* src, T* dest ) { for ( int idx = getCurThreadIdx(); idx < shiftVec._num; idx += getThreadNum() ) { const int shift = shiftVec._arr[ idx ]; dest[ idx + shift ] = src[ idx ]; } } template< typename T > void GpuDel::shiftExpandVec( IntDVec &shiftVec, DevVector< T > &dataVec, int size ) { DevVector< T > tempVec = _memPool.allocateAny<T>( size ); tempVec.resize( size ); hipLaunchKernelGGL(( kerShift), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( shiftVec ), toKernelPtr( dataVec ), toKernelPtr( tempVec ) ); CudaCheckError(); dataVec.copyFrom( tempVec ); _memPool.release( tempVec ); } void GpuDel::shiftOppVec( IntDVec &shiftVec, TriOppDVec &dataVec, int size ) { TriOppDVec tempVec = _memPool.allocateAny< TriOpp >( size ); tempVec.resize( size ); hipLaunchKernelGGL(( kerShiftOpp), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( shiftVec ), toKernelPtr( dataVec ), toKernelPtr( tempVec ), size ); CudaCheckError(); dataVec.copyFrom( tempVec ); _memPool.release( tempVec ); } void GpuDel::shiftTri( IntDVec &triToVert, IntDVec &splitTriVec ) { startTiming( ProfDefault ); const int triNum = _triVec.size() + 2 * splitTriVec.size(); IntDVec shiftVec = _memPool.allocateAny<int>( _triMax ); thrust_scan_TriHasVert( triToVert, shiftVec ); shiftExpandVec( shiftVec, _triVec, triNum ); shiftExpandVec( shiftVec, _triInfoVec, triNum ); shiftExpandVec( shiftVec, triToVert, triNum ); shiftOppVec( shiftVec, _oppVec, triNum ); hipLaunchKernelGGL(( kerShiftTriIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTriVec ), toKernelPtr( shiftVec ) ); CudaCheckError(); hipLaunchKernelGGL(( kerShiftTriIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( splitTriVec ), toKernelPtr( shiftVec ) ); CudaCheckError(); _memPool.release( shiftVec ); stopTiming( ProfDefault, _output->stats.sortTime ); } void GpuDel::relocateAll() { if ( _flipVec.size() == 0 ) return ; startTiming( ProfDefault ); if ( _availPtNum > 0 ) { const int triNum = _triVec.size(); IntDVec triToFlip = _memPool.allocateAny<int>( _triMax ); triToFlip.assign( triNum, -1 ); // Rebuild the pointers from back to forth int nextFlipNum = _flipVec.size(); for ( int i = _orgFlipNum.size() - 1; i >= 0; --i ) { int prevFlipNum = _orgFlipNum[ i ]; int flipNum = nextFlipNum - prevFlipNum; hipLaunchKernelGGL(( kerUpdateFlipTrace), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _flipVec ), toKernelPtr( triToFlip ), prevFlipNum, flipNum ); nextFlipNum = prevFlipNum; } CudaCheckError(); // Relocate points IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum ); _counters.renew(); hipLaunchKernelGGL(( kerRelocatePointsFast), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _vertTriVec ), toKernelPtr( triToFlip ), toKernelPtr( _flipVec ), toKernelPtr( exactCheckVec ), _counters.ptr() ); hipLaunchKernelGGL(( kerRelocatePointsExact), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _vertTriVec ), toKernelPtr( triToFlip ), toKernelPtr( _flipVec ), toKernelPtr( exactCheckVec ), _counters.ptr() ); CudaCheckError(); _memPool.release( exactCheckVec ); _memPool.release( triToFlip ); } // Just clean up the flips _flipVec.resize( 0 ); _orgFlipNum.clear(); // Reset the triMsgVec _triMsgVec.assign( _triMax, make_int2( -1, -1 ) ); stopTiming( ProfDefault, _output->stats.relocateTime ); } void GpuDel::compactTris() { const int triNum = _triVec.size(); IntDVec prefixVec = _memPool.allocateAny<int>( _triMax ); prefixVec.resize( triNum ); thrust_scan_TriAliveStencil( _triInfoVec, prefixVec ); int newTriNum = prefixVec[ triNum - 1 ]; int freeNum = triNum - newTriNum; IntDVec freeVec = _memPool.allocateAny<int>( _triMax ); freeVec.resize( freeNum ); hipLaunchKernelGGL(( kerCollectFreeSlots), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelPtr( _triInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( freeVec ), newTriNum ); CudaCheckError(); // Make map hipLaunchKernelGGL(( kerMakeCompactMap), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _triInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( freeVec ), newTriNum ); CudaCheckError(); // Reorder the tets hipLaunchKernelGGL(( kerCompactTris), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _triInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), newTriNum ); CudaCheckError(); _triInfoVec.resize( newTriNum ); _triVec.resize( newTriNum ); _oppVec.resize( newTriNum ); _memPool.release( freeVec ); _memPool.release( prefixVec ); } void GpuDel::outputToHost() { startTiming( ProfDefault ); hipLaunchKernelGGL(( kerMarkInfinityTri), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _triVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _oppVec ), _infIdx ); CudaCheckError(); compactTris(); if ( !_input->noSort ) { // Change the indices back to the original order hipLaunchKernelGGL(( kerUpdateVertIdx), dim3(BlocksPerGrid), dim3(ThreadsPerBlock) , 0, 0, toKernelArray( _triVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _orgPointIdx ) ); CudaCheckError(); } //// // Copy to host _triVec.copyToHost( _output->triVec ); _oppVec.copyToHost( _output->triOppVec ); // Output Infty point _output->ptInfty = _ptInfty; stopTiming( ProfDefault, _output->stats.outTime ); //// std::cout << "# Triangles: " << _triVec.size() << std::endl; return; }
bae5839f2888f8a8455c1a1b9213750919b89064.cu
#include "../GpuDelaunay.h" #include<iomanip> #include<iostream> #include "KerCommon.h" #include "KerDivision.h" #include "KerPredicates.h" #include "ThrustWrapper.h" #include "../../Visualizer.h" //// // GpuDel methods //// void GpuDel::cleanup() { thrust_free_all(); _memPool.free(); _pointVec.free(); _constraintVec.free(); _triVec.free(); _oppVec.free(); _triInfoVec.free(); _orgPointIdx.free(); _vertTriVec.free(); _counters.free(); _actConsVec.free(); _orgFlipNum.clear(); _dPredWrapper.cleanup(); __circleCountVec.free(); __rejFlipVec.free(); _numActiveVec.clear(); _numFlipVec.clear(); _numCircleVec.clear(); _timeCheckVec.clear(); _timeFlipVec.clear(); } void GpuDel::compute ( const GDel2DInput& input, GDel2DOutput* output ) { // Set L1 for kernels cudaDeviceSetCacheConfig( cudaFuncCachePreferL1 ); _input = &input; _output = output; initProfiling(); startTiming( ProfNone ); initForFlip(); splitAndFlip(); outputToHost(); stopTiming( ProfNone, _output->stats.totalTime ); if ( _input->isProfiling( ProfDetail ) ) { std::cout << " FlipCompact time: "; _diagLogCompact.printTime(); std::cout << std::endl; std::cout << " FlipCollect time: "; _diagLogCollect.printTime(); std::cout << std::endl; } cleanup(); return; } void GpuDel::startTiming( ProfLevel level ) { if ( _input->isProfiling( level ) ) _profTimer[ level ].start(); } void GpuDel::pauseTiming( ProfLevel level ) { if ( _input->isProfiling( level ) ) _profTimer[ level ].pause(); } void GpuDel::stopTiming( ProfLevel level, double &accuTime ) { if ( _input->isProfiling( level ) ) { _profTimer[ level ].stop(); accuTime += _profTimer[ level ].value(); } } void GpuDel::restartTiming( ProfLevel level, double &accuTime ) { stopTiming( level, accuTime ); startTiming( level ); } struct CompareX { __device__ bool operator()( const Point2 &a, const Point2 &b ) const { return a._p[0] < b._p[0]; } }; struct Get2Ddist { Point2 _a; RealType abx, aby; Get2Ddist( const Point2 &a, const Point2 &b ) : _a(a) { abx = b._p[0] - a._p[0]; aby = b._p[1] - a._p[1]; } __device__ int operator()( const Point2 &c ) { RealType acx = c._p[0] - _a._p[0]; RealType acy = c._p[1] - _a._p[1]; RealType dist = abx * acy - aby * acx; return __float_as_int( fabs((float) dist) ); } }; RealType orient2dzero( const RealType *pa, const RealType *pb, const RealType *pc ); void GpuDel::constructInitialTriangles() { // First, choose two extreme points along the X axis typedef Point2DVec::iterator Point2DIter; thrust::pair< Point2DIter, Point2DIter > ret = thrust::minmax_element( _pointVec.begin(), _pointVec.end(), CompareX() ); int v0 = ret.first - _pointVec.begin(); int v1 = ret.second - _pointVec.begin(); const Point2 p0 = _pointVec[v0]; const Point2 p1 = _pointVec[v1]; // Find the furthest point from v0v1 IntDVec distVec = _memPool.allocateAny<int>( _pointNum ); distVec.resize( _pointVec.size() ); thrust::transform( _pointVec.begin(), _pointVec.end(), distVec.begin(), Get2Ddist( p0, p1 ) ); const int v2 = thrust::max_element( distVec.begin(), distVec.end() ) - distVec.begin(); const Point2 p2 = _pointVec[v2]; _memPool.release( distVec ); if ( _input->isProfiling( ProfDebug ) ) { std::cout << "Leftmost: " << v0 << " --> " << p0._p[0] << " " << p0._p[1] << std::endl; std::cout << "Rightmost: " << v1 << " --> " << p1._p[0] << " " << p1._p[1] << std::endl; std::cout << "Furthest 2D: " << v2 << " --> " << p2._p[0] << " " << p2._p[1] << std::endl; } // Check to make sure the 4 points are not co-planar RealType ori = orient2dzero( p0._p, p1._p, p2._p ); if ( ori == 0.0 ) { std::cout << "Input too degenerate!!!\n" << std::endl; exit(-1); } if ( ortToOrient( ori ) == OrientNeg ) std::swap( v0, v1 ); // Compute the centroid of v0v1v2v3, to be used as the kernel point. _ptInfty._p[0] = ( p0._p[0] + p1._p[0] + p2._p[0] ) / 3.0; _ptInfty._p[1] = ( p0._p[1] + p1._p[1] + p2._p[1] ) / 3.0; // Add the infinity point to the end of the list _infIdx = _pointNum - 1; _pointVec.resize( _pointNum ); _pointVec[ _infIdx ] = _ptInfty; if ( _input->isProfiling( ProfDiag ) ) { std::cout << "Kernel: " << _ptInfty._p[0] << " " << _ptInfty._p[1] << std::endl; } // Initialize the predicate wrapper!!! _dPredWrapper.init( toKernelPtr( _pointVec ), _pointNum, _input->noSort ? NULL : toKernelPtr( _orgPointIdx ), _infIdx ); setPredWrapperConstant( _dPredWrapper ); // Create the initial triangulation Tri firstTri = { v0, v1, v2 }; _triVec.expand( 4 ); _oppVec.expand( 4 ); _triInfoVec.expand( 4 ); // Put the initial tets at the Inf list kerMakeFirstTri<<< 1, 1 >>>( toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), firstTri, _infIdx ); CudaCheckError(); // Locate initial positions of points _vertTriVec.resize( _pointNum ); IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum ); _counters.renew(); kerInitPointLocationFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTriVec ), toKernelPtr( exactCheckVec ), _counters.ptr(), firstTri ); kerInitPointLocationExact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>( toKernelPtr( _vertTriVec ), toKernelPtr( exactCheckVec ), _counters.ptr(), firstTri ); CudaCheckError(); _memPool.release( exactCheckVec ); _availPtNum = _pointNum - 4; Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx ); } void GpuDel::initForFlip() { startTiming( ProfDefault ); _pointNum = _input->pointVec.size() + 1; // Plus the infinity point _triMax = (int) ( _pointNum * 2 ); // Copy points to GPU _pointVec.resize( _pointNum ); // 1 additional slot for the infinity point _pointVec.copyFromHost( _input->pointVec ); // Copy constraints to GPU _constraintVec.copyFromHost( _input->constraintVec ); // Allocate space _triVec.resize( _triMax ); _oppVec.resize( _triMax ); _triInfoVec.resize( _triMax ); _counters.init( CounterNum ); if ( _constraintVec.size() > 0 ) _actConsVec.resize( _constraintVec.size() ); if ( _input->isProfiling( ProfDiag ) ) { __circleCountVec.resize( _triMax ); __rejFlipVec.resize( _triMax ); } // Preallocate some buffers in the pool _memPool.reserve<FlipItem>( _triMax ); // flipVec _memPool.reserve<int2>( _triMax ); // triMsgVec _memPool.reserve<int>( _pointNum ); // vertSphereVec _memPool.reserve<int>( _triMax ); // actTriVec _memPool.reserve<int>( _triMax ); // Two more for common use _memPool.reserve<int>( _triMax ); // if ( _constraintVec.size() > 0 ) _memPool.reserve<int>( _triMax ); // Find the min and max coordinate value typedef thrust::device_ptr< RealType > RealPtr; RealPtr coords( ( RealType* ) toKernelPtr( _pointVec ) ); thrust::pair< RealPtr, RealPtr> ret = thrust::minmax_element( coords, coords + _pointVec.size() * 2 ); _minVal = *ret.first; _maxVal = *ret.second; if ( _input->isProfiling( ProfDebug ) ) { std::cout << "_minVal = " << _minVal << ", _maxVal == " << _maxVal << std::endl; } // Sort points along space curve if ( !_input->noSort ) { stopTiming( ProfDefault, _output->stats.initTime ); startTiming( ProfDefault ); IntDVec valueVec = _memPool.allocateAny<int>( _pointNum ); valueVec.resize( _pointVec.size() ); _orgPointIdx.resize( _pointNum ); thrust::sequence( _orgPointIdx.begin(), _orgPointIdx.end(), 0 ); thrust_transform_GetMortonNumber( _pointVec.begin(), _pointVec.end(), valueVec.begin(), _minVal, _maxVal ); thrust_sort_by_key( valueVec.begin(), valueVec.end(), make_zip_iterator( make_tuple( _orgPointIdx.begin(), _pointVec.begin() ) ) ); _memPool.release( valueVec ); stopTiming( ProfDefault, _output->stats.sortTime ); startTiming( ProfDefault ); } // Create first upper-lower triangles constructInitialTriangles(); stopTiming( ProfDefault, _output->stats.initTime ); return; } void GpuDel::doFlippingLoop( CheckDelaunayMode checkMode ) { startTiming( ProfDefault ); _flipVec = _memPool.allocateAny<FlipItem>( _triMax ); _triMsgVec = _memPool.allocateAny<int2>( _triMax ); _actTriVec = _memPool.allocateAny<int>( _triMax ); _triMsgVec.assign( _triMax, make_int2( -1, -1 ) ); int flipLoop = 0; _actTriMode = ActTriMarkCompact; _diagLog = &_diagLogCompact; while ( doFlipping( checkMode ) ) ++flipLoop; stopTiming( ProfDefault, _output->stats.flipTime ); relocateAll(); _memPool.release( _triMsgVec ); _memPool.release( _flipVec ); _memPool.release( _actTriVec ); } void GpuDel::initProfiling() { _output->stats.reset(); _diagLogCompact.reset(); _diagLogCollect.reset(); _numActiveVec.clear(); _numFlipVec.clear(); _timeCheckVec.clear(); _timeFlipVec.clear(); } void GpuDel::initForConstraintInsertion() { if ( !_input->noSort ) { // Update vertex indices of constraints IntDVec mapVec = _memPool.allocateAny<int>( _pointNum ); mapVec.resize( _pointNum ); thrust_scatterSequenceMap( _orgPointIdx, mapVec ); thrust::device_ptr<int> segInt( (int *) toKernelPtr( _constraintVec ) ); thrust::gather( segInt, segInt + _constraintVec.size() * 2, mapVec.begin(), segInt ); _memPool.release( mapVec ); // // Sort the constraints // const int constraintNum = _constraintVec.size(); // IntDVec keyVec = _memPool.allocateAny<int>( constraintNum ); // keyVec.resize( constraintNum ); // thrust::transform( _constraintVec.begin(), _constraintVec.end(), keyVec.begin(), GetConstraintMinVert() ); // thrust::sort_by_key( keyVec.begin(), keyVec.end(), _constraintVec.begin() ); // _memPool.release( keyVec ); } // Construct _vertTriVec.resize( _pointNum ); kerMapTriToVert<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _triVec ), toKernelPtr( _vertTriVec ) ); CudaCheckError(); // Initialize list of active constraints thrust::sequence( _actConsVec.begin(), _actConsVec.end() ); } bool GpuDel::markIntersections() { _counters.renew(); kerMarkTriConsIntersectionFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _actConsVec ), toKernelPtr( _constraintVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _vertTriVec ), toKernelPtr( _triConsVec ), _counters.ptr() ); kerMarkTriConsIntersectionExact<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _actConsVec ), toKernelPtr( _constraintVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _vertTriVec ), toKernelPtr( _triConsVec ), _counters.ptr() ); CudaCheckError(); return ( _counters[ CounterFlag ] == 1 ); } void GpuDel::updatePairStatus() { IntDVec exactVec = _memPool.allocateAny<int>( _triMax ); _counters.renew(); kerUpdatePairStatusFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( exactVec ), _counters.ptr() ); kerUpdatePairStatusExact<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( exactVec ), _counters.ptr() ); CudaCheckError(); _memPool.release( exactVec ); } void GpuDel::checkConsFlipping( IntDVec& triVoteVec ) { IntDVec exactVec = _memPool.allocateAny<int>( _triMax ); _counters.renew(); kerCheckConsFlippingFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( triVoteVec ), toKernelPtr( exactVec ), _counters.ptr() ); kerCheckConsFlippingExact<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( triVoteVec ), toKernelPtr( exactVec ), _counters.ptr() ); CudaCheckError(); _memPool.release( exactVec ); } bool GpuDel::doConsFlipping( int &flipNum ) { const int triNum = _triVec.size(); const int actNum = _actTriVec.size(); /////// // Vote for flips /////// #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) __rejFlipVec.assign( triNum, 0 ); #pragma endregion updatePairStatus(); IntDVec triVoteVec = _memPool.allocateAny<int>( _triMax ); triVoteVec.assign( triNum, INT_MAX ); checkConsFlipping( triVoteVec ); //// // Mark rejected flips //// IntDVec flipToTri = _memPool.allocateAny<int>( _triMax ); flipToTri.resize( actNum ); kerMarkRejectedConsFlips<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _actTriVec ), toKernelPtr( _triConsVec ), toKernelPtr( triVoteVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _oppVec ), toKernelPtr( flipToTri ), _input->isProfiling( ProfDiag ) ? toKernelPtr( __rejFlipVec ) : NULL ); CudaCheckError(); _memPool.release( triVoteVec ); //// // Compact flips //// IntDVec temp = _memPool.allocateAny<int>( _triMax, true ); flipNum = compactIfNegative( flipToTri, temp ); if ( 0 == flipNum ) { _memPool.release( flipToTri ); return false; } //// // Expand flip vector //// int orgFlipNum = _flipVec.size(); int expFlipNum = orgFlipNum + flipNum; if ( expFlipNum > _flipVec.capacity() ) { _flipVec.resize( 0 ); _triMsgVec.assign( _triMax, make_int2( -1, -1 ) ); orgFlipNum = 0; expFlipNum = flipNum; } _flipVec.grow( expFlipNum ); // See doFlipping _triMsgVec.resize( _triVec.size() ); //// // Flipping //// #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { const int rejFlipNum = thrust_sum( __rejFlipVec ); std::cout << " ConsFlips: " << flipNum << " ( " << rejFlipNum << " )" << std::endl; } #pragma endregion // 32 ThreadsPerBlock is optimal kerFlip<<< BlocksPerGrid, 32 >>>( toKernelArray( flipToTri ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), NULL, toKernelPtr( _triMsgVec ), NULL, toKernelPtr( _flipVec ), toKernelPtr( _triConsVec ), toKernelPtr( _vertTriVec ), orgFlipNum, 0 ); CudaCheckError(); //// // Update oppTri //// kerUpdateOpp<<< BlocksPerGrid, 32 >>>( toKernelPtr( _flipVec ) + orgFlipNum, toKernelPtr( _oppVec ), toKernelPtr( _triMsgVec ), toKernelPtr( flipToTri ), orgFlipNum, flipNum ); CudaCheckError(); _memPool.release( flipToTri ); ///////////////////////////////////////////////////////////////////// return true; } void GpuDel::doInsertConstraints() { startTiming( ProfDefault ); initForConstraintInsertion(); const int triNum = _triVec.size(); _triConsVec = _memPool.allocateAny<int>( triNum ); _triConsVec.assign( triNum, -1 ); _flipVec = _memPool.allocateAny<FlipItem>( _triMax ); _triMsgVec = _memPool.allocateAny<int2>( _triMax ); _actTriVec = _memPool.allocateAny<int>( _triMax ); _triMsgVec.assign( _triMax, make_int2( -1, -1 ) ); int outerLoop = 0; int flipLoop = 0; int totFlipNum = 0; int flipNum; while ( markIntersections() ) { if ( _input->isProfiling( ProfDiag ) ) std::cout << "Iter " << ( outerLoop+1 ) << std::endl; // VISUALIZATION if ( Visualizer::instance()->isEnable() ) { pauseTiming( ProfNone ); pauseTiming( ProfDefault ); IntHVec triColorVec; _triConsVec.copyToHost( triColorVec ); for ( int i = 0; i < triColorVec.size(); ++i ) if ( triColorVec[i] != -1 ) triColorVec[i] >>= 4; Visualizer::instance()->addFrame( _pointVec, _constraintVec, _triVec, triColorVec, _infIdx ); startTiming( ProfDefault ); startTiming( ProfNone ); } // Collect active triangles thrust_copyIf_IsNotNegative( _triConsVec, _actTriVec ); int innerLoop = 0; while ( doConsFlipping( flipNum ) ) { totFlipNum += flipNum; // VISUALIZATION if ( Visualizer::instance()->isEnable() ) { pauseTiming( ProfNone ); pauseTiming( ProfDefault ); IntHVec triColorVec; _triConsVec.copyToHost( triColorVec ); for ( int i = 0; i < triColorVec.size(); ++i ) if ( triColorVec[i] != -1 ) triColorVec[i] >>= 4; Visualizer::instance()->addFrame( _pointVec, _constraintVec, _triVec, triColorVec, _infIdx ); startTiming( ProfDefault ); startTiming( ProfNone ); } ++flipLoop; ++innerLoop; if ( innerLoop == 5 ) break; //if ( flipLoop == 1 ) break; } ++outerLoop; // Mark all the possibly modified triangles as Alive + Changed (3). thrust_scatterConstantMap( _actTriVec, _triInfoVec, 3 ); //if ( outerLoop == 5 ) break; } //if ( outerLoop >= 20 ) //{ // for ( int i = 0; i < _actTriVec.size(); ++i ) // std::cout << _actTriVec[i] << " "; // std::cout << std::endl; //} if ( _input->isProfiling( ProfDiag ) ) std::cout << "ConsFlip: Outer loop = " << outerLoop << ", inner loop = " << flipLoop << ", total flip = " << totFlipNum << std::endl; _memPool.release( _triConsVec ); _memPool.release( _triMsgVec ); _memPool.release( _actTriVec ); _memPool.release( _flipVec ); stopTiming( ProfDefault, _output->stats.constraintTime ); } void GpuDel::splitAndFlip() { int insLoop = 0; _doFlipping = !_input->insAll; ////////////////// while ( _availPtNum > 0 ) ////////////////// { //////////////////////// splitTri(); //////////////////////// if ( _doFlipping ) doFlippingLoop( CircleFastOrientFast ); ++insLoop; } ////////////////////////////// if ( !_doFlipping ) doFlippingLoop( CircleFastOrientFast ); markSpecialTris(); doFlippingLoop( CircleExactOrientSoS ); ////////////////////////////// // Insert constraints if needed if ( _constraintVec.size() > 0 ) doInsertConstraints(); doFlippingLoop( CircleFastOrientFast ); markSpecialTris(); doFlippingLoop( CircleExactOrientSoS ); #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { std::cout << "\nInsert loops: " << insLoop << std::endl; std::cout << "Compact: " << std::endl; _diagLogCompact.printCount(); std::cout << "Collect: " << std::endl; _diagLogCollect.printCount(); } #pragma endregion return; } void GpuDel::markSpecialTris() { startTiming( ProfDetail ); kerMarkSpecialTris<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _triInfoVec ), toKernelPtr( _oppVec ) ); CudaCheckError(); stopTiming( ProfDetail, _diagLog->_t[ 0 ] ); } void GpuDel::expandTri( int newTriNum ) { //*** Expand triangles _triVec.expand( newTriNum ); _oppVec.expand( newTriNum ); _triInfoVec.expand( newTriNum ); } void GpuDel::splitTri() { const int MaxSamplePerTri = 100; startTiming( ProfDefault ); //// // Rank points //// int triNum = _triVec.size(); int noSample = _pointNum; if ( noSample / triNum > MaxSamplePerTri ) noSample = triNum * MaxSamplePerTri; IntDVec triCircleVec = _memPool.allocateAny<int>( _triMax ); triCircleVec.assign( triNum, INT_MIN ); IntDVec vertCircleVec = _memPool.allocateAny<int>( _pointNum ); vertCircleVec.resize( noSample ); kerVoteForPoint<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTriVec ), toKernelPtr( _triVec ), toKernelPtr( vertCircleVec ), toKernelPtr( triCircleVec ), noSample ); CudaCheckError(); IntDVec triToVert = _memPool.allocateAny<int>( _triMax ); triToVert.assign( triNum, INT_MAX ); kerPickWinnerPoint<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTriVec ), toKernelPtr( vertCircleVec ), toKernelPtr( triCircleVec ), toKernelPtr( triToVert ), noSample ); CudaCheckError(); _memPool.release( vertCircleVec ); _memPool.release( triCircleVec ); //// // Collect triangles with insertions //// IntDVec splitTriVec = _memPool.allocateAny<int>( _pointNum ); _insNum = thrust_copyIf_TriHasVert( triToVert, splitTriVec ); const int extraTriNum = DIM * _insNum; const int splitTriNum = triNum + extraTriNum; if ( _input->isProfiling( ProfDiag ) ) { std::cout << "Insert: " << _insNum << " Tri from: " << triNum << " to: " << splitTriNum << std::endl; } // If there's just a few points if ( _availPtNum - _insNum < _insNum && _insNum < 0.1 * _pointNum ) { _doFlipping = false; //std::cout << "Stop flipping!" << std::endl; } if ( !_input->noReorder && _doFlipping ) { stopTiming( ProfDefault, _output->stats.splitTime ); shiftTri( triToVert, splitTriVec ); triNum = -1; // Mark that we have shifted the array startTiming( ProfDefault ); } //// // Make map //// IntDVec insTriMap = _memPool.allocateAny<int>( _triMax ); insTriMap.assign( ( triNum < 0 ) ? splitTriNum : triNum, -1 ); thrust_scatterSequenceMap( splitTriVec, insTriMap ); //// // Expand if space needed //// expandTri( splitTriNum ); //// // Update the location of the points //// stopTiming( ProfDefault, _output->stats.splitTime ); startTiming( ProfDefault ); IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum ); _counters.renew(); kerSplitPointsFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTriVec ), toKernelPtr( triToVert ), toKernelPtr( _triVec ), toKernelPtr( insTriMap ), toKernelPtr( exactCheckVec ), _counters.ptr(), triNum, _insNum ); kerSplitPointsExactSoS<<< PredBlocksPerGrid, PredThreadsPerBlock >>>( toKernelPtr( _vertTriVec ), toKernelPtr( triToVert ), toKernelPtr( _triVec ), toKernelPtr( insTriMap ), toKernelPtr( exactCheckVec ), _counters.ptr(), triNum, _insNum ); CudaCheckError(); _memPool.release( exactCheckVec ); stopTiming( ProfDefault, _output->stats.relocateTime ); startTiming( ProfDefault ); //// // Split old into new triangle and copy them to new array //// kerSplitTri<<< BlocksPerGrid, 32 >>>( toKernelArray( splitTriVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( insTriMap ), toKernelPtr( triToVert ), triNum, _insNum ); CudaCheckError(); _memPool.release( triToVert ); _memPool.release( insTriMap ); _memPool.release( splitTriVec ); _availPtNum -= _insNum; stopTiming( ProfDefault, _output->stats.splitTime ); Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx ); return; } bool GpuDel::doFlipping( CheckDelaunayMode checkMode ) { startTiming( ProfDetail ); ++_diagLog->_flipLoop; const int triNum = _triVec.size(); //// // Compact active triangles //// switch ( _actTriMode ) { case ActTriMarkCompact: thrust_copyIf_IsActiveTri( _triInfoVec, _actTriVec ); break; case ActTriCollectCompact: IntDVec temp = _memPool.allocateAny<int>( _triMax, true ); compactIfNegative( _actTriVec, temp ); break; } int orgActNum = _actTriVec.size(); #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { _numActiveVec.push_back( orgActNum ); if ( orgActNum == 0 || ( checkMode != CircleExactOrientSoS && orgActNum < PredBlocksPerGrid * PredThreadsPerBlock ) ) { _numFlipVec.push_back( 0 ); _timeCheckVec.push_back( 0.0 ); _timeFlipVec.push_back( 0.0 ); _numCircleVec.push_back( 0 ); } } #pragma endregion restartTiming( ProfDetail, _diagLog->_t[ 0 ] ); ///////////////////////////////////////////////////////////////////// //// // Check actNum, switch mode or quit if necessary //// // No more work if ( 0 == orgActNum ) return false; // Little work, leave it for the Exact iterations if ( checkMode != CircleExactOrientSoS && orgActNum < PredBlocksPerGrid * PredThreadsPerBlock ) return false; // See if there's little work enough to switch to collect mode. // Safety check: make sure there's enough space to collect if ( orgActNum < BlocksPerGrid * ThreadsPerBlock && orgActNum * 2 < _actTriVec.capacity() && orgActNum * 2 < triNum ) { _actTriMode = ActTriCollectCompact; _diagLog = &_diagLogCollect; } else { _actTriMode = ActTriMarkCompact; _diagLog = &_diagLogCompact; } //// // Vote for flips //// #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { __circleCountVec.assign( triNum, 0 ); __rejFlipVec.assign( triNum, 0 ); } #pragma endregion IntDVec triVoteVec = _memPool.allocateAny<int>( _triMax ); triVoteVec.assign( triNum, INT_MAX ); dispatchCheckDelaunay( checkMode, orgActNum, triVoteVec ); double prevTime = _diagLog->_t[ 1 ]; restartTiming( ProfDetail, _diagLog->_t[ 1 ] ); ///////////////////////////////////////////////////////////////////// //// // Mark rejected flips //// IntDVec flipToTri = _memPool.allocateAny<int>( _triMax ); flipToTri.resize( orgActNum ); kerMarkRejectedFlips<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _actTriVec ), toKernelPtr( _oppVec ), toKernelPtr( triVoteVec ), toKernelPtr( _triInfoVec ), toKernelPtr( flipToTri ), orgActNum, _input->isProfiling( ProfDiag ) ? toKernelPtr( __rejFlipVec ) : NULL ); CudaCheckError(); _memPool.release( triVoteVec ); restartTiming( ProfDetail, _diagLog->_t[ 2 ] ); ///////////////////////////////////////////////////////////////////// //// // Compact flips //// IntDVec temp = _memPool.allocateAny<int>( _triMax, true ); const int flipNum = compactIfNegative( flipToTri, temp ); if ( _input->isProfiling( ProfDiag ) ) { _numFlipVec.push_back( flipNum ); _timeCheckVec.push_back( _diagLog->_t[ 1 ] - prevTime ); } restartTiming( ProfDetail, _diagLog->_t[ 3 ] ); ///////////////////////////////////////////////////////////////////// //// // Preparation for the actual flipping. Include several steps //// #pragma region Diagnostic if ( _input->isProfiling( ProfDiag ) ) { const int circleNum = thrust_sum( __circleCountVec ); _diagLog->_circleCount += circleNum; const int rejFlipNum = thrust_sum( __rejFlipVec ); _diagLog->_rejFlipCount += rejFlipNum; _diagLog->_totFlipNum += flipNum; std::cout << "Acts: " << orgActNum << " Flips: " << flipNum << " ( " << rejFlipNum << " )" << " circle: " << circleNum << " Exact: " << ( checkMode == CircleExactOrientSoS ? _counters[ CounterExact ] : -1 ) << std::endl; _numCircleVec.push_back( circleNum ); startTiming( ProfDetail ); } #pragma endregion if ( 0 == flipNum ) { _numCircleVec.push_back( 0 ); _timeFlipVec.push_back( 0 ); _memPool.release( flipToTri ); return false; } // Expand flip vector int orgFlipNum = _flipVec.size(); int expFlipNum = orgFlipNum + flipNum; if ( expFlipNum > _flipVec.capacity() ) { stopTiming( ProfDetail, _diagLog->_t[ 4 ] ); stopTiming( ProfDefault, _output->stats.flipTime ); relocateAll(); startTiming( ProfDefault ); startTiming( ProfDetail ); orgFlipNum = 0; expFlipNum = flipNum; } _flipVec.grow( expFlipNum ); // _triMsgVec contains two components. // - .x is the encoded new neighbor information // - .y is the flipIdx as in the flipVec (i.e. globIdx) // As such, we do not need to initialize it to -1 to // know which tris are not flipped in the current rount. // We can rely on the flipIdx being > or < than orgFlipIdx. // Note that we have to initialize everything to -1 // when we clear the flipVec and reset the flip indexing. // _triMsgVec.resize( _triVec.size() ); //// // Expand active tri vector //// if ( _actTriMode == ActTriCollectCompact ) _actTriVec.grow( orgActNum + flipNum ); restartTiming( ProfDetail, _diagLog->_t[ 4 ] ); ///////////////////////////////////////////////////////////////////// //// // Flipping //// // 32 ThreadsPerBlock is optimal kerFlip<<< BlocksPerGrid, 32 >>>( toKernelArray( flipToTri ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _triMsgVec ), ( _actTriMode == ActTriCollectCompact ) ? toKernelPtr( _actTriVec ) : NULL, toKernelPtr( _flipVec ), NULL, NULL, orgFlipNum, orgActNum ); CudaCheckError(); _orgFlipNum.push_back( orgFlipNum ); //// // Update oppTri //// kerUpdateOpp<<< BlocksPerGrid, 32 >>>( toKernelPtr( _flipVec ) + orgFlipNum, toKernelPtr( _oppVec ), toKernelPtr( _triMsgVec ), toKernelPtr( flipToTri ), orgFlipNum, flipNum ); CudaCheckError(); _memPool.release( flipToTri ); prevTime = _diagLog->_t[ 5 ]; stopTiming( ProfDetail, _diagLog->_t[ 5 ] ); if ( _input->isProfiling( ProfDiag ) ) _timeFlipVec.push_back( _diagLog->_t[ 5 ] - prevTime ); ///////////////////////////////////////////////////////////////////// Visualizer::instance()->addFrame( _pointVec, SegmentDVec(), _triVec, _infIdx ); return true; } void GpuDel::dispatchCheckDelaunay ( CheckDelaunayMode checkMode, int orgActNum, IntDVec& triVoteVec ) { switch ( checkMode ) { case CircleFastOrientFast: kerCheckDelaunayFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _actTriVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( triVoteVec ), orgActNum, _input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL ); CudaCheckError(); break; case CircleExactOrientSoS: // Reuse this array to save memory Int2DVec &exactCheckVi = _triMsgVec; _counters.renew(); kerCheckDelaunayExact_Fast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _actTriVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( _triInfoVec ), toKernelPtr( triVoteVec ), toKernelPtr( exactCheckVi ), orgActNum, _counters.ptr(), _input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL ); kerCheckDelaunayExact_Exact<<< PredBlocksPerGrid, PredThreadsPerBlock >>>( toKernelPtr( _triVec ), toKernelPtr( _oppVec ), toKernelPtr( triVoteVec ), toKernelPtr( exactCheckVi ), _counters.ptr(), _input->isProfiling( ProfDiag ) ? toKernelPtr( __circleCountVec ) : NULL ); CudaCheckError(); break; } } template< typename T > __global__ void kerShift ( KerIntArray shiftVec, T* src, T* dest ) { for ( int idx = getCurThreadIdx(); idx < shiftVec._num; idx += getThreadNum() ) { const int shift = shiftVec._arr[ idx ]; dest[ idx + shift ] = src[ idx ]; } } template< typename T > void GpuDel::shiftExpandVec( IntDVec &shiftVec, DevVector< T > &dataVec, int size ) { DevVector< T > tempVec = _memPool.allocateAny<T>( size ); tempVec.resize( size ); kerShift<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( shiftVec ), toKernelPtr( dataVec ), toKernelPtr( tempVec ) ); CudaCheckError(); dataVec.copyFrom( tempVec ); _memPool.release( tempVec ); } void GpuDel::shiftOppVec( IntDVec &shiftVec, TriOppDVec &dataVec, int size ) { TriOppDVec tempVec = _memPool.allocateAny< TriOpp >( size ); tempVec.resize( size ); kerShiftOpp<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( shiftVec ), toKernelPtr( dataVec ), toKernelPtr( tempVec ), size ); CudaCheckError(); dataVec.copyFrom( tempVec ); _memPool.release( tempVec ); } void GpuDel::shiftTri( IntDVec &triToVert, IntDVec &splitTriVec ) { startTiming( ProfDefault ); const int triNum = _triVec.size() + 2 * splitTriVec.size(); IntDVec shiftVec = _memPool.allocateAny<int>( _triMax ); thrust_scan_TriHasVert( triToVert, shiftVec ); shiftExpandVec( shiftVec, _triVec, triNum ); shiftExpandVec( shiftVec, _triInfoVec, triNum ); shiftExpandVec( shiftVec, triToVert, triNum ); shiftOppVec( shiftVec, _oppVec, triNum ); kerShiftTriIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTriVec ), toKernelPtr( shiftVec ) ); CudaCheckError(); kerShiftTriIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( splitTriVec ), toKernelPtr( shiftVec ) ); CudaCheckError(); _memPool.release( shiftVec ); stopTiming( ProfDefault, _output->stats.sortTime ); } void GpuDel::relocateAll() { if ( _flipVec.size() == 0 ) return ; startTiming( ProfDefault ); if ( _availPtNum > 0 ) { const int triNum = _triVec.size(); IntDVec triToFlip = _memPool.allocateAny<int>( _triMax ); triToFlip.assign( triNum, -1 ); // Rebuild the pointers from back to forth int nextFlipNum = _flipVec.size(); for ( int i = _orgFlipNum.size() - 1; i >= 0; --i ) { int prevFlipNum = _orgFlipNum[ i ]; int flipNum = nextFlipNum - prevFlipNum; kerUpdateFlipTrace<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _flipVec ), toKernelPtr( triToFlip ), prevFlipNum, flipNum ); nextFlipNum = prevFlipNum; } CudaCheckError(); // Relocate points IntDVec exactCheckVec = _memPool.allocateAny<int>( _pointNum ); _counters.renew(); kerRelocatePointsFast<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _vertTriVec ), toKernelPtr( triToFlip ), toKernelPtr( _flipVec ), toKernelPtr( exactCheckVec ), _counters.ptr() ); kerRelocatePointsExact<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _vertTriVec ), toKernelPtr( triToFlip ), toKernelPtr( _flipVec ), toKernelPtr( exactCheckVec ), _counters.ptr() ); CudaCheckError(); _memPool.release( exactCheckVec ); _memPool.release( triToFlip ); } // Just clean up the flips _flipVec.resize( 0 ); _orgFlipNum.clear(); // Reset the triMsgVec _triMsgVec.assign( _triMax, make_int2( -1, -1 ) ); stopTiming( ProfDefault, _output->stats.relocateTime ); } void GpuDel::compactTris() { const int triNum = _triVec.size(); IntDVec prefixVec = _memPool.allocateAny<int>( _triMax ); prefixVec.resize( triNum ); thrust_scan_TriAliveStencil( _triInfoVec, prefixVec ); int newTriNum = prefixVec[ triNum - 1 ]; int freeNum = triNum - newTriNum; IntDVec freeVec = _memPool.allocateAny<int>( _triMax ); freeVec.resize( freeNum ); kerCollectFreeSlots<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelPtr( _triInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( freeVec ), newTriNum ); CudaCheckError(); // Make map kerMakeCompactMap<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _triInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( freeVec ), newTriNum ); CudaCheckError(); // Reorder the tets kerCompactTris<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _triInfoVec ), toKernelPtr( prefixVec ), toKernelPtr( _triVec ), toKernelPtr( _oppVec ), newTriNum ); CudaCheckError(); _triInfoVec.resize( newTriNum ); _triVec.resize( newTriNum ); _oppVec.resize( newTriNum ); _memPool.release( freeVec ); _memPool.release( prefixVec ); } void GpuDel::outputToHost() { startTiming( ProfDefault ); kerMarkInfinityTri<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _triVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _oppVec ), _infIdx ); CudaCheckError(); compactTris(); if ( !_input->noSort ) { // Change the indices back to the original order kerUpdateVertIdx<<< BlocksPerGrid, ThreadsPerBlock >>>( toKernelArray( _triVec ), toKernelPtr( _triInfoVec ), toKernelPtr( _orgPointIdx ) ); CudaCheckError(); } //// // Copy to host _triVec.copyToHost( _output->triVec ); _oppVec.copyToHost( _output->triOppVec ); // Output Infty point _output->ptInfty = _ptInfty; stopTiming( ProfDefault, _output->stats.outTime ); //// std::cout << "# Triangles: " << _triVec.size() << std::endl; return; }
gen_hip.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * This file belongs to the Galois project, a C++ library for exploiting parallelism. * The code is being released under the terms of the 3-Clause BSD License (a * copy is located in LICENSE.txt at the top-level directory). * * Copyright (C) 2018, The University of Texas at Austin. All rights reserved. * UNIVERSITY EXPRESSLY DISCLAIMS ANY AND ALL WARRANTIES CONCERNING THIS * SOFTWARE AND DOCUMENTATION, INCLUDING ANY WARRANTIES OF MERCHANTABILITY, * FITNESS FOR ANY PARTICULAR PURPOSE, NON-INFRINGEMENT AND WARRANTIES OF * PERFORMANCE, AND ANY WARRANTY THAT MIGHT OTHERWISE ARISE FROM COURSE OF * DEALING OR USAGE OF TRADE. NO WARRANTY IS EITHER EXPRESS OR IMPLIED WITH * RESPECT TO THE USE OF THE SOFTWARE OR DOCUMENTATION. Under no circumstances * shall University be liable for incidental, special, indirect, direct or * consequential damages or loss of profits, interruption of business, or * related expenses which may arise from use of Software or Documentation, * including but not limited to those resulting from defects in Software and/or * Documentation, or loss or inaccuracy of data of any kind. */ /* -*- mode: c++ -*- */ #include "gg.h" #include "ggcuda.h" void kernel_sizing(CSRGraph &, dim3 &, dim3 &); #define TB_SIZE 256 const char *GGC_OPTIONS = "coop_conv=False $ outline_iterate_gb=False $ backoff_blocking_factor=4 $ parcomb=True $ np_schedulers=set(['fg', 'tb', 'wp']) $ cc_disable=set([]) $ hacks=set([]) $ np_factor=8 $ instrument=set([]) $ unroll=[] $ instrument_mode=None $ read_props=None $ outline_iterate=True $ ignore_nested_errors=False $ np=True $ write_props=None $ quiet_cgen=True $ retry_backoff=True $ cuda.graph_type=basic $ cuda.use_worklist_slots=True $ cuda.worklist_type=basic"; #include "kernels/reduce.cuh" #include "gen_hip.cuh" static const int __tb_InitializeGraph2 = TB_SIZE; static const int __tb_KCoreStep1 = TB_SIZE; __global__ void InitializeGraph2(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, DynamicBitset& bitset_current_degree) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_InitializeGraph2; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "6 -> 7; bool pop = src < __end; // FP: "7 -> 8; if (pop) { } // FP: "9 -> 10; // FP: "12 -> 13; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "13 -> 14; __shared__ struct { ; } _np_closure [TB_SIZE]; // FP: "14 -> 15; // FP: "15 -> 16; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "18 -> 19; // FP: "19 -> 20; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "20 -> 21; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "21 -> 22; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "24 -> 25; __syncthreads(); // FP: "25 -> 26; while (true) { // FP: "26 -> 27; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "29 -> 30; __syncthreads(); // FP: "30 -> 31; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "31 -> 32; __syncthreads(); // FP: "32 -> 33; break; } // FP: "34 -> 35; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "37 -> 38; __syncthreads(); // FP: "38 -> 39; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "39 -> 40; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "42 -> 43; assert(nps.tb.src < __kernel_tb_size); // FP: "43 -> 44; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type current_edge; current_edge = ns +_np_j; { index_type dest_node; dest_node = graph.getAbsDestination(current_edge); atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1); bitset_current_degree.set(dest_node); } } // FP: "51 -> 52; __syncthreads(); } // FP: "53 -> 54; // FP: "54 -> 55; { const int warpid = threadIdx.x / 32; // FP: "55 -> 56; const int _np_laneid = cub::LaneId(); // FP: "56 -> 57; while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type current_edge; current_edge = _np_w_start +_np_ii; { index_type dest_node; dest_node = graph.getAbsDestination(current_edge); atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1); bitset_current_degree.set(dest_node); } } } // FP: "74 -> 75; __syncthreads(); // FP: "75 -> 76; } // FP: "76 -> 77; __syncthreads(); // FP: "77 -> 78; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "78 -> 79; while (_np.work()) { // FP: "79 -> 80; int _np_i =0; // FP: "80 -> 81; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "81 -> 82; __syncthreads(); // FP: "82 -> 83; // FP: "83 -> 84; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type current_edge; assert(nps.fg.src[_np_i] < __kernel_tb_size); current_edge= nps.fg.itvalue[_np_i]; { index_type dest_node; dest_node = graph.getAbsDestination(current_edge); atomicTestAdd(&p_current_degree[dest_node], (uint32_t)1); bitset_current_degree.set(dest_node); } } // FP: "92 -> 93; _np.execute_round_done(ITSIZE); // FP: "93 -> 94; __syncthreads(); } // FP: "95 -> 96; assert(threadIdx.x < __kernel_tb_size); } // FP: "97 -> 98; } __global__ void InitializeGraph1(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { p_flag[src] = true; p_trim[src] = 0; p_current_degree[src] = 0; } } // FP: "9 -> 10; } __global__ void KCoreStep2(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; index_type src_end; // FP: "1 -> 2; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_flag[src]) { if (p_trim[src] > 0) { p_current_degree[src] = p_current_degree[src] - p_trim[src]; } } p_trim[src] = 0; } } // FP: "12 -> 13; } __global__ void KCoreStep1(CSRGraph graph, unsigned int __begin, unsigned int __end, uint32_t local_k_core_num, uint32_t * p_current_degree, uint8_t * p_flag, uint32_t * p_trim, HGAccumulator<unsigned int> DGAccumulator_accum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = __tb_KCoreStep1; __shared__ hipcub::BlockReduce<unsigned int, TB_SIZE>::TempStorage DGAccumulator_accum_ts; index_type src_end; index_type src_rup; // FP: "1 -> 2; const int _NP_CROSSOVER_WP = 32; const int _NP_CROSSOVER_TB = __kernel_tb_size; // FP: "2 -> 3; const int BLKSIZE = __kernel_tb_size; const int ITSIZE = BLKSIZE * 8; // FP: "3 -> 4; typedef hipcub::BlockScan<multiple_sum<2, index_type>, BLKSIZE> BlockScan; typedef union np_shared<BlockScan::TempStorage, index_type, struct tb_np, struct warp_np<__kernel_tb_size/32>, struct fg_np<ITSIZE> > npsTy; // FP: "4 -> 5; __shared__ npsTy nps ; // FP: "5 -> 6; // FP: "6 -> 7; DGAccumulator_accum.thread_entry(); // FP: "7 -> 8; src_end = __end; src_rup = ((__begin) + roundup(((__end) - (__begin)), (blockDim.x))); for (index_type src = __begin + tid; src < src_rup; src += nthreads) { multiple_sum<2, index_type> _np_mps; multiple_sum<2, index_type> _np_mps_total; // FP: "8 -> 9; bool pop = src < __end; // FP: "9 -> 10; if (pop) { if (p_flag[src]) { if (p_current_degree[src] < local_k_core_num) { p_flag[src] = false; DGAccumulator_accum.reduce( 1); } else { pop = false; } } else { pop = false; } } // FP: "17 -> 18; // FP: "20 -> 21; struct NPInspector1 _np = {0,0,0,0,0,0}; // FP: "21 -> 22; __shared__ struct { ; } _np_closure [TB_SIZE]; // FP: "22 -> 23; // FP: "23 -> 24; if (pop) { _np.size = (graph).getOutDegree(src); _np.start = (graph).getFirstEdge(src); } // FP: "26 -> 27; // FP: "27 -> 28; _np_mps.el[0] = _np.size >= _NP_CROSSOVER_WP ? _np.size : 0; _np_mps.el[1] = _np.size < _NP_CROSSOVER_WP ? _np.size : 0; // FP: "28 -> 29; BlockScan(nps.temp_storage).ExclusiveSum(_np_mps, _np_mps, _np_mps_total); // FP: "29 -> 30; if (threadIdx.x == 0) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "32 -> 33; __syncthreads(); // FP: "33 -> 34; while (true) { // FP: "34 -> 35; if (_np.size >= _NP_CROSSOVER_TB) { nps.tb.owner = threadIdx.x; } // FP: "37 -> 38; __syncthreads(); // FP: "38 -> 39; if (nps.tb.owner == MAX_TB_SIZE + 1) { // FP: "39 -> 40; __syncthreads(); // FP: "40 -> 41; break; } // FP: "42 -> 43; if (nps.tb.owner == threadIdx.x) { nps.tb.start = _np.start; nps.tb.size = _np.size; nps.tb.src = threadIdx.x; _np.start = 0; _np.size = 0; } // FP: "45 -> 46; __syncthreads(); // FP: "46 -> 47; int ns = nps.tb.start; int ne = nps.tb.size; // FP: "47 -> 48; if (nps.tb.src == threadIdx.x) { nps.tb.owner = MAX_TB_SIZE + 1; } // FP: "50 -> 51; assert(nps.tb.src < __kernel_tb_size); // FP: "51 -> 52; for (int _np_j = threadIdx.x; _np_j < ne; _np_j += BLKSIZE) { index_type current_edge; current_edge = ns +_np_j; { index_type dst; dst = graph.getAbsDestination(current_edge); atomicTestAdd(&p_trim[dst], (uint32_t)1); } } // FP: "59 -> 60; __syncthreads(); } // FP: "61 -> 62; // FP: "62 -> 63; { const int warpid = threadIdx.x / 32; // FP: "63 -> 64; const int _np_laneid = cub::LaneId(); // FP: "64 -> 65; while (__any(_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB)) { if (_np.size >= _NP_CROSSOVER_WP && _np.size < _NP_CROSSOVER_TB) { nps.warp.owner[warpid] = _np_laneid; } if (nps.warp.owner[warpid] == _np_laneid) { nps.warp.start[warpid] = _np.start; nps.warp.size[warpid] = _np.size; nps.warp.src[warpid] = threadIdx.x; _np.start = 0; _np.size = 0; } index_type _np_w_start = nps.warp.start[warpid]; index_type _np_w_size = nps.warp.size[warpid]; assert(nps.warp.src[warpid] < __kernel_tb_size); for (int _np_ii = _np_laneid; _np_ii < _np_w_size; _np_ii += 32) { index_type current_edge; current_edge = _np_w_start +_np_ii; { index_type dst; dst = graph.getAbsDestination(current_edge); atomicTestAdd(&p_trim[dst], (uint32_t)1); } } } // FP: "82 -> 83; __syncthreads(); // FP: "83 -> 84; } // FP: "84 -> 85; __syncthreads(); // FP: "85 -> 86; _np.total = _np_mps_total.el[1]; _np.offset = _np_mps.el[1]; // FP: "86 -> 87; while (_np.work()) { // FP: "87 -> 88; int _np_i =0; // FP: "88 -> 89; _np.inspect2(nps.fg.itvalue, nps.fg.src, ITSIZE, threadIdx.x); // FP: "89 -> 90; __syncthreads(); // FP: "90 -> 91; // FP: "91 -> 92; for (_np_i = threadIdx.x; _np_i < ITSIZE && _np.valid(_np_i); _np_i += BLKSIZE) { index_type current_edge; assert(nps.fg.src[_np_i] < __kernel_tb_size); current_edge= nps.fg.itvalue[_np_i]; { index_type dst; dst = graph.getAbsDestination(current_edge); atomicTestAdd(&p_trim[dst], (uint32_t)1); } } // FP: "100 -> 101; _np.execute_round_done(ITSIZE); // FP: "101 -> 102; __syncthreads(); } // FP: "103 -> 104; assert(threadIdx.x < __kernel_tb_size); } // FP: "107 -> 108; DGAccumulator_accum.thread_exit<hipcub::BlockReduce<unsigned int, TB_SIZE> >(DGAccumulator_accum_ts); // FP: "108 -> 109; } __global__ void KCoreSanityCheck(CSRGraph graph, unsigned int __begin, unsigned int __end, uint8_t * p_flag, HGAccumulator<uint64_t> DGAccumulator_accum) { unsigned tid = TID_1D; unsigned nthreads = TOTAL_THREADS_1D; const unsigned __kernel_tb_size = TB_SIZE; __shared__ hipcub::BlockReduce<uint64_t, TB_SIZE>::TempStorage DGAccumulator_accum_ts; index_type src_end; // FP: "1 -> 2; // FP: "2 -> 3; DGAccumulator_accum.thread_entry(); // FP: "3 -> 4; src_end = __end; for (index_type src = __begin + tid; src < src_end; src += nthreads) { bool pop = src < __end; if (pop) { if (p_flag[src]) { DGAccumulator_accum.reduce( 1); } } } // FP: "11 -> 12; DGAccumulator_accum.thread_exit<hipcub::BlockReduce<uint64_t, TB_SIZE> >(DGAccumulator_accum_ts); // FP: "12 -> 13; } void InitializeGraph2_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( InitializeGraph2) , dim3(blocks), dim3(__tb_InitializeGraph2), 0, 0, ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), *(ctx->current_degree.is_updated.gpu_rd_ptr())); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph2_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph2_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void InitializeGraph2_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph2_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void InitializeGraph2_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph2_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void InitializeGraph1_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( InitializeGraph1) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void InitializeGraph1_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph1_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void InitializeGraph1_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph1_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void InitializeGraph1_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; InitializeGraph1_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void KCoreStep2_cuda(unsigned int __begin, unsigned int __end, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; hipLaunchKernelGGL(( KCoreStep2) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr()); // FP: "5 -> 6; check_cuda_kernel; // FP: "6 -> 7; } void KCoreStep2_allNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreStep2_cuda(0, ctx->gg.nnodes, ctx); // FP: "2 -> 3; } void KCoreStep2_masterNodes_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreStep2_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, ctx); // FP: "2 -> 3; } void KCoreStep2_nodesWithEdges_cuda(struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreStep2_cuda(0, ctx->numNodesWithEdges, ctx); // FP: "2 -> 3; } void KCoreStep1_cuda(unsigned int __begin, unsigned int __end, unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<unsigned int> _DGAccumulator_accum; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<unsigned int> DGAccumulator_accumval = Shared<unsigned int>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_accumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr(); // FP: "8 -> 9; hipLaunchKernelGGL(( KCoreStep1) , dim3(blocks), dim3(__tb_KCoreStep1), 0, 0, ctx->gg, __begin, __end, local_k_core_num, ctx->current_degree.data.gpu_wr_ptr(), ctx->flag.data.gpu_wr_ptr(), ctx->trim.data.gpu_wr_ptr(), _DGAccumulator_accum); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr()); // FP: "11 -> 12; } void KCoreStep1_allNodes_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreStep1_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, local_k_core_num, ctx); // FP: "2 -> 3; } void KCoreStep1_masterNodes_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreStep1_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, local_k_core_num, ctx); // FP: "2 -> 3; } void KCoreStep1_nodesWithEdges_cuda(unsigned int & DGAccumulator_accum, uint32_t local_k_core_num, struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreStep1_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, local_k_core_num, ctx); // FP: "2 -> 3; } void KCoreSanityCheck_cuda(unsigned int __begin, unsigned int __end, uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { dim3 blocks; dim3 threads; HGAccumulator<uint64_t> _DGAccumulator_accum; // FP: "1 -> 2; // FP: "2 -> 3; // FP: "3 -> 4; kernel_sizing(blocks, threads); // FP: "4 -> 5; Shared<uint64_t> DGAccumulator_accumval = Shared<uint64_t>(1); // FP: "5 -> 6; // FP: "6 -> 7; *(DGAccumulator_accumval.cpu_wr_ptr()) = 0; // FP: "7 -> 8; _DGAccumulator_accum.rv = DGAccumulator_accumval.gpu_wr_ptr(); // FP: "8 -> 9; hipLaunchKernelGGL(( KCoreSanityCheck) , dim3(blocks), dim3(threads), 0, 0, ctx->gg, __begin, __end, ctx->flag.data.gpu_wr_ptr(), _DGAccumulator_accum); // FP: "9 -> 10; check_cuda_kernel; // FP: "10 -> 11; DGAccumulator_accum = *(DGAccumulator_accumval.cpu_rd_ptr()); // FP: "11 -> 12; } void KCoreSanityCheck_allNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreSanityCheck_cuda(0, ctx->gg.nnodes, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void KCoreSanityCheck_masterNodes_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreSanityCheck_cuda(ctx->beginMaster, ctx->beginMaster + ctx->numOwned, DGAccumulator_accum, ctx); // FP: "2 -> 3; } void KCoreSanityCheck_nodesWithEdges_cuda(uint64_t & DGAccumulator_accum, struct CUDA_Context* ctx) { // FP: "1 -> 2; KCoreSanityCheck_cuda(0, ctx->numNodesWithEdges, DGAccumulator_accum, ctx); // FP: "2 -> 3; }
gen_hip.cuh
// !!! This is a file automatically generated by hipify!!! #pragma once #include <hip/hip_runtime.h> #include <stdio.h> #include <sys/types.h> #include <unistd.h> #include "gen_cuda.h" #include "galois/runtime/cuda/DeviceSync.h" struct CUDA_Context : public CUDA_Context_Common { struct CUDA_Context_Field<float* /*ARRAY 4*/> some_value; }; struct CUDA_Context* get_CUDA_context(int id) { struct CUDA_Context* ctx; ctx = (struct CUDA_Context* ) calloc(1, sizeof(struct CUDA_Context)); ctx->id = id; return ctx; } bool init_CUDA_context(struct CUDA_Context* ctx, int device) { return init_CUDA_context_common(ctx, device); } void load_graph_CUDA(struct CUDA_Context* ctx, MarshalGraph &g, unsigned num_hosts) { size_t mem_usage = mem_usage_CUDA_common(g, num_hosts); mem_usage += mem_usage_CUDA_field(&ctx->some_value, g, num_hosts); printf("[%d] Host memory for communication context: %3u MB\n", ctx->id, mem_usage/1048756); load_graph_CUDA_common(ctx, g, num_hosts); load_graph_CUDA_array_field(ctx, &ctx->some_value, num_hosts, 4); reset_CUDA_context(ctx); } void reset_CUDA_context(struct CUDA_Context* ctx) { ctx->some_value.data.zero_gpu(); } void get_bitset_some_value_cuda(struct CUDA_Context* ctx, uint64_t* bitset_compute) { ctx->some_value.is_updated.cpu_rd_ptr()->copy_to_cpu(bitset_compute); } void bitset_some_value_reset_cuda(struct CUDA_Context* ctx) { ctx->some_value.is_updated.cpu_rd_ptr()->reset(); } void bitset_some_value_reset_cuda(struct CUDA_Context* ctx, size_t begin, size_t end) { reset_bitset_field(&ctx->some_value, begin, end); } float* /*ARRAY 4*/ get_node_some_value_cuda(struct CUDA_Context* ctx, unsigned LID) { float* /*ARRAY 4*/ *some_value = ctx->some_value.data.cpu_rd_ptr(); return some_value[LID]; } void set_node_some_value_cuda(struct CUDA_Context* ctx, unsigned LID, float* /*ARRAY 4*/ v) { float* /*ARRAY 4*/ *some_value = ctx->some_value.data.cpu_wr_ptr(); int i; for(i = 0; i < 4; ++i) some_value[LID][i] = v[i]; } void add_node_some_value_cuda(struct CUDA_Context* ctx, unsigned LID, float* /*ARRAY 4*/ v) { float* /*ARRAY 4*/ *some_value = ctx->some_value.data.cpu_wr_ptr(); int i; for(i = 0; i < 4; ++i) some_value[LID][i] += v[i]; } bool min_node_some_value_cuda(struct CUDA_Context* ctx, unsigned LID, float* /*ARRAY 4*/ v) { float* /*ARRAY 4*/ *some_value = ctx->some_value.data.cpu_wr_ptr(); if (some_value[LID] > v){ some_value[LID] = v; return true; } return false; } void batch_get_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v) { batch_get_shared_field<float* /*ARRAY 4*/, sharedMaster, false>(ctx, &ctx->some_value, from_id, v); } void batch_get_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, size_t* v_size, DataCommMode* data_mode) { batch_get_shared_field<float* /*ARRAY 4*/, sharedMaster, false>(ctx, &ctx->some_value, from_id, v, v_size, data_mode); } void batch_get_mirror_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v) { batch_get_shared_field<float* /*ARRAY 4*/, sharedMirror, false>(ctx, &ctx->some_value, from_id, v); } void batch_get_mirror_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, size_t* v_size, DataCommMode* data_mode) { batch_get_shared_field<float* /*ARRAY 4*/, sharedMirror, false>(ctx, &ctx->some_value, from_id, v, v_size, data_mode); } void batch_get_reset_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, float* /*ARRAY 4*/ i) { batch_get_shared_field<float* /*ARRAY 4*/, sharedMirror, true>(ctx, &ctx->some_value, from_id, v, i); } void batch_get_reset_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, size_t* v_size, DataCommMode* data_mode, float* /*ARRAY 4*/ i) { batch_get_shared_field<float* /*ARRAY 4*/, sharedMirror, true>(ctx, &ctx->some_value, from_id, v, v_size, data_mode, i); } void batch_set_mirror_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, DataCommMode data_mode) { batch_set_shared_field<float* /*ARRAY 4*/, sharedMirror, setOp>(ctx, &ctx->some_value, from_id, v, data_mode); } void batch_set_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, DataCommMode data_mode) { batch_set_shared_field<float* /*ARRAY 4*/, sharedMaster, setOp>(ctx, &ctx->some_value, from_id, v, data_mode); } void batch_add_mirror_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, DataCommMode data_mode) { batch_set_shared_field<float* /*ARRAY 4*/, sharedMirror, addOp>(ctx, &ctx->some_value, from_id, v, data_mode); } void batch_add_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, DataCommMode data_mode) { batch_set_shared_field<float* /*ARRAY 4*/, sharedMaster, addOp>(ctx, &ctx->some_value, from_id, v, data_mode); } void batch_min_mirror_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, DataCommMode data_mode) { batch_set_shared_field<float* /*ARRAY 4*/, sharedMirror, minOp>(ctx, &ctx->some_value, from_id, v, data_mode); } void batch_min_node_some_value_cuda(struct CUDA_Context* ctx, unsigned from_id, uint8_t* v, DataCommMode data_mode) { batch_set_shared_field<float* /*ARRAY 4*/, sharedMaster, minOp>(ctx, &ctx->some_value, from_id, v, data_mode); } void batch_reset_node_some_value_cuda(struct CUDA_Context* ctx, size_t begin, size_t end, float* /*ARRAY 4*/ v) { reset_data_field<float* /*ARRAY 4*/>(&ctx->some_value, begin, end, v); }
254d1a87fdc6f803f33f48f48420e11a25702acf.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <c10/macros/Macros.h> #include <ATen/hip/HIPContext.h> #include <ATen/native/hip/block_reduce.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/CUDAFunctions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/zeros_like.h> #include <ATen/ops/sum_cuda_dispatch.h> #include <ATen/ops/multilabel_margin_loss.h> #endif namespace at::native { namespace { const int MULTILABELMARGIN_THREADS = 128; void check_shape(const Tensor& input, const Tensor& target) { int64_t ndims = input.dim(); bool valid_inputs = (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || (ndims == 0); TORCH_CHECK( valid_inputs, "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ", input.sizes()); if (ndims <= 1) { int dim = input.dim() == 0 ? 1 : input.size(0); TORCH_CHECK( valid_inputs && target.dim() <= 1 && target.numel() == dim, "inconsistent target size: ", target.sizes(), " for input of size: ", input.sizes()); } else if (ndims == 2) { int nframe = input.size(0); int dim = input.size(1); TORCH_CHECK( valid_inputs && target.dim() == 2 && target.size(0) == nframe && target.size(1) == dim, "inconsistent target size: ", target.sizes(), " for input of size: ", input.sizes()); } else { TORCH_CHECK(false, "Expected input of ndims <= 2, but got ndims: ", ndims); } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(MULTILABELMARGIN_THREADS) __global__ void multilabel_margin_loss_forward_kernel( scalar_t* output, const scalar_t* input, const int64_t* target, scalar_t* is_target, int nframe, int dim, bool size_average) { // vectors: int k = blockIdx.x; const scalar_t* input_k = input + k * dim; const int64_t* target_k = target + k * dim; scalar_t* output_k = output + k; scalar_t* is_target_k = is_target + k * dim; // zero is_target for (int d = threadIdx.x; d < dim; d += blockDim.x) { is_target_k[d] = static_cast<scalar_t>(0); } __syncthreads(); // mark targets in is_target if (threadIdx.x == 0) { for (int dt = 0; dt < dim; dt++) { int target_idx = target_k[dt]; if (target_idx < 0) { break; } is_target_k[target_idx] = static_cast<scalar_t>(1); } } __syncthreads(); // iterate over targets accscalar_t sum = 0; for (int dt = 0; dt < dim; dt++) { // next target: int target_idx = target_k[dt]; if (target_idx < 0) { break; } // current value for target scalar_t input_target_k = input_k[target_idx]; // compare to all inputs (multithreaded): for (int d = threadIdx.x; d < dim; d += blockDim.x) { // contribute to loss only if not a target if (!static_cast<int>(is_target_k[d])) { scalar_t z = 1 - input_target_k + input_k[d]; if (z > 0) { sum += z; } } } } // Temporary sums (for mapreduce) __shared__ accscalar_t smem[MULTILABELMARGIN_THREADS]; accscalar_t total_sum = cuda_utils::BlockReduceSum(sum, smem); if (threadIdx.x == 0) { if (size_average) { *output_k = static_cast<scalar_t>((total_sum / dim) / nframe); } else { *output_k = static_cast<scalar_t>(total_sum / dim); } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(MULTILABELMARGIN_THREADS) __global__ void multilabel_margin_loss_backward_kernel( scalar_t* grad_input, const scalar_t* grad_output, const scalar_t* input, const int64_t* target, const scalar_t* is_target, int nframe, int dim, bool size_average, bool reduce) { int k = blockIdx.x; const scalar_t* input_k = input + k * dim; scalar_t* grad_input_k = grad_input + k * dim; const int64_t* target_k = target + k * dim; const scalar_t* is_target_k = is_target + k * dim; const scalar_t* grad_output_k = grad_output; if (!reduce) { grad_output_k += k; } // gain: scalar_t g = static_cast<scalar_t>( size_average && reduce ? 1. / static_cast<accscalar_t>(nframe * dim) : 1. / static_cast<accscalar_t>(dim)); // zero gradients: for (int d = threadIdx.x; d < dim; d += blockDim.x) { grad_input_k[d] = static_cast<scalar_t>(0); } __syncthreads(); // iterate over targets for (int dt = 0; dt < dim; dt++) { // next target: int target_idx = static_cast<int>(target_k[dt]); if (target_idx < 0) { break; } // current value for target scalar_t input_target_k = input_k[target_idx]; // compare to all inputs (multithreaded): accscalar_t sum = 0; for (int d = threadIdx.x; d < dim; d += blockDim.x) { // contribute to loss only if not a target if (!static_cast<int>(is_target_k[d])) { scalar_t z = 1 - input_target_k + input_k[d]; if (z > 0) { sum -= g; grad_input_k[d] += g; } } } __syncthreads(); // Temporary sums (for mapreduce) __shared__ accscalar_t smem[MULTILABELMARGIN_THREADS]; accscalar_t total_sum = cuda_utils::BlockReduceSum(sum, smem); if (threadIdx.x == 0) { grad_input_k[target_idx] += static_cast<scalar_t>(total_sum); } } for (int d = threadIdx.x; d < dim; d += blockDim.x) { grad_input_k[d] *= *grad_output_k; } } void multilabel_margin_loss_forward_out_cuda_template( const Tensor& input, const Tensor& target, int64_t reduction, Tensor& output, Tensor& is_target) { check_shape(input, target); if (input.numel() == 0) { return; } auto input_ = input.contiguous(); auto target_ = target.contiguous(); auto is_target_ = is_target.contiguous(); is_target_.resize_as_(target); if (input.dim() <= 1) { int dim = input.dim() == 0 ? 1 : input.size(0); output.resize_({}); dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_forward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; hipLaunchKernelGGL(( multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.mutable_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.mutable_data_ptr<scalar_t>(), 1, dim, reduction == at::Reduction::Mean); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } else if (input.dim() == 2) { int nframe = input.size(0); int dim = input.size(1); dim3 blocks(input.size(0)); dim3 threads(MULTILABELMARGIN_THREADS); if (reduction != at::Reduction::None) { auto output_tmp = at::empty({input_.size(0)}, input_.options()); output.resize_({}); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_forward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; hipLaunchKernelGGL(( multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output_tmp.mutable_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.mutable_data_ptr<scalar_t>(), nframe, dim, reduction == at::Reduction::Mean); C10_HIP_KERNEL_LAUNCH_CHECK(); }); at::cuda::sum_out( output, output_tmp, at::IntArrayRef(std::vector<int64_t>{}), false, output.scalar_type()); } else { output.resize_({input.size(0)}); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_forward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; hipLaunchKernelGGL(( multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, at::hip::getCurrentHIPStreamMasqueradingAsCUDA(), output.mutable_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.mutable_data_ptr<scalar_t>(), nframe, dim, false); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } } else { TORCH_CHECK( false, "Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ", input.sizes()); } } void multilabel_margin_loss_backward_cuda_out_template( const Tensor& grad_output, const Tensor& input, const Tensor& target, int64_t reduction, const Tensor& is_target, Tensor& grad_input) { check_shape(input, target); auto input_ = input.contiguous(); if (input_.numel() == 0) { return; } grad_input.resize_as_(input_); auto target_ = target.contiguous(); auto is_target_ = is_target.contiguous(); auto grad_output_ = grad_output.contiguous(); if (grad_input.dim() <= 1) { int dim = grad_input.dim() == 0 ? 1 : grad_input.size(0); int target_size = target_.dim() == 0 ? 1 : target_.size(0); TORCH_CHECK( (target_.numel() != 0) && (target_.dim() <= 1) && (target_size == dim), "inconsistent target size"); TORCH_CHECK( target_.sizes() == is_target_.sizes(), "inconsistent is_target size"); dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_backward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; hipLaunchKernelGGL(( multilabel_margin_loss_backward_kernel<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.mutable_data_ptr<scalar_t>(), grad_output_.const_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.const_data_ptr<scalar_t>(), 1, dim, reduction == at::Reduction::Mean, reduction != at::Reduction::None); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } else if (grad_input.dim() == 2) { int nframe = grad_input.size(0); int dim = grad_input.size(1); TORCH_CHECK( (input_.size(1) != 0) && (target_.dim() == 2) && (target_.size(0) == nframe) && (target_.size(1) == dim), "inconsistent target size"); TORCH_CHECK(target_.sizes() == is_target_.sizes(), "inconsistent is_target size"); dim3 blocks(grad_input.size(0)); dim3 threads(MULTILABELMARGIN_THREADS); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_backward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; hipLaunchKernelGGL(( multilabel_margin_loss_backward_kernel<scalar_t, accscalar_t>) , dim3(blocks), dim3(threads), 0, c10::hip::getCurrentHIPStreamMasqueradingAsCUDA(), grad_input.mutable_data_ptr<scalar_t>(), grad_output_.const_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.const_data_ptr<scalar_t>(), grad_input.size(0), grad_input.size(1), reduction == at::Reduction::Mean, reduction != at::Reduction::None); C10_HIP_KERNEL_LAUNCH_CHECK(); }); } else { TORCH_CHECK( false, "Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ", grad_input.sizes()); } } } // namespace std::tuple<Tensor&, Tensor&> multilabel_margin_loss_forward_out_cuda( const Tensor& self, const Tensor& target, int64_t reduction, Tensor& output, Tensor& is_target) { multilabel_margin_loss_forward_out_cuda_template( self, target, reduction, output, is_target); return std::tuple<Tensor&, Tensor&>(output, is_target); } std::tuple<Tensor, Tensor> multilabel_margin_loss_forward_cuda( const Tensor& self, const Tensor& target, int64_t reduction) { auto output = at::empty({0}, self.options()); auto is_target = at::empty({0}, self.options()); multilabel_margin_loss_forward_out_cuda_template( self, target, reduction, output, is_target); return std::make_tuple(output, is_target); } Tensor& multilabel_margin_loss_backward_cuda_out( const Tensor& grad_output, const Tensor& self, const Tensor& target, int64_t reduction, const Tensor& is_target, Tensor& grad_input) { multilabel_margin_loss_backward_cuda_out_template( grad_output, self, target, reduction, is_target, grad_input); return grad_input; } Tensor multilabel_margin_loss_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& target, int64_t reduction, const Tensor& is_target) { auto grad_input = at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); multilabel_margin_loss_backward_cuda_out_template( grad_output, self, target, reduction, is_target, grad_input); return grad_input; } } // namespace at::native
254d1a87fdc6f803f33f48f48420e11a25702acf.cu
#define TORCH_ASSERT_ONLY_METHOD_OPERATORS #include <ATen/core/Tensor.h> #include <ATen/AccumulateType.h> #include <ATen/Dispatch.h> #include <c10/macros/Macros.h> #include <ATen/cuda/CUDAContext.h> #include <ATen/native/cuda/block_reduce.cuh> #ifndef AT_PER_OPERATOR_HEADERS #include <ATen/Functions.h> #include <ATen/CUDAFunctions.h> #include <ATen/NativeFunctions.h> #else #include <ATen/ops/empty.h> #include <ATen/ops/zeros_like.h> #include <ATen/ops/sum_cuda_dispatch.h> #include <ATen/ops/multilabel_margin_loss.h> #endif namespace at::native { namespace { const int MULTILABELMARGIN_THREADS = 128; void check_shape(const Tensor& input, const Tensor& target) { int64_t ndims = input.dim(); bool valid_inputs = (ndims == 2 && input.size(1) != 0) || (ndims == 1 && input.size(0) != 0) || (ndims == 0); TORCH_CHECK( valid_inputs, "Expected non-empty vector or matrix with optional 0-dim batch size, but got: ", input.sizes()); if (ndims <= 1) { int dim = input.dim() == 0 ? 1 : input.size(0); TORCH_CHECK( valid_inputs && target.dim() <= 1 && target.numel() == dim, "inconsistent target size: ", target.sizes(), " for input of size: ", input.sizes()); } else if (ndims == 2) { int nframe = input.size(0); int dim = input.size(1); TORCH_CHECK( valid_inputs && target.dim() == 2 && target.size(0) == nframe && target.size(1) == dim, "inconsistent target size: ", target.sizes(), " for input of size: ", input.sizes()); } else { TORCH_CHECK(false, "Expected input of ndims <= 2, but got ndims: ", ndims); } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(MULTILABELMARGIN_THREADS) __global__ void multilabel_margin_loss_forward_kernel( scalar_t* output, const scalar_t* input, const int64_t* target, scalar_t* is_target, int nframe, int dim, bool size_average) { // vectors: int k = blockIdx.x; const scalar_t* input_k = input + k * dim; const int64_t* target_k = target + k * dim; scalar_t* output_k = output + k; scalar_t* is_target_k = is_target + k * dim; // zero is_target for (int d = threadIdx.x; d < dim; d += blockDim.x) { is_target_k[d] = static_cast<scalar_t>(0); } __syncthreads(); // mark targets in is_target if (threadIdx.x == 0) { for (int dt = 0; dt < dim; dt++) { int target_idx = target_k[dt]; if (target_idx < 0) { break; } is_target_k[target_idx] = static_cast<scalar_t>(1); } } __syncthreads(); // iterate over targets accscalar_t sum = 0; for (int dt = 0; dt < dim; dt++) { // next target: int target_idx = target_k[dt]; if (target_idx < 0) { break; } // current value for target scalar_t input_target_k = input_k[target_idx]; // compare to all inputs (multithreaded): for (int d = threadIdx.x; d < dim; d += blockDim.x) { // contribute to loss only if not a target if (!static_cast<int>(is_target_k[d])) { scalar_t z = 1 - input_target_k + input_k[d]; if (z > 0) { sum += z; } } } } // Temporary sums (for mapreduce) __shared__ accscalar_t smem[MULTILABELMARGIN_THREADS]; accscalar_t total_sum = cuda_utils::BlockReduceSum(sum, smem); if (threadIdx.x == 0) { if (size_average) { *output_k = static_cast<scalar_t>((total_sum / dim) / nframe); } else { *output_k = static_cast<scalar_t>(total_sum / dim); } } } template <typename scalar_t, typename accscalar_t> C10_LAUNCH_BOUNDS_1(MULTILABELMARGIN_THREADS) __global__ void multilabel_margin_loss_backward_kernel( scalar_t* grad_input, const scalar_t* grad_output, const scalar_t* input, const int64_t* target, const scalar_t* is_target, int nframe, int dim, bool size_average, bool reduce) { int k = blockIdx.x; const scalar_t* input_k = input + k * dim; scalar_t* grad_input_k = grad_input + k * dim; const int64_t* target_k = target + k * dim; const scalar_t* is_target_k = is_target + k * dim; const scalar_t* grad_output_k = grad_output; if (!reduce) { grad_output_k += k; } // gain: scalar_t g = static_cast<scalar_t>( size_average && reduce ? 1. / static_cast<accscalar_t>(nframe * dim) : 1. / static_cast<accscalar_t>(dim)); // zero gradients: for (int d = threadIdx.x; d < dim; d += blockDim.x) { grad_input_k[d] = static_cast<scalar_t>(0); } __syncthreads(); // iterate over targets for (int dt = 0; dt < dim; dt++) { // next target: int target_idx = static_cast<int>(target_k[dt]); if (target_idx < 0) { break; } // current value for target scalar_t input_target_k = input_k[target_idx]; // compare to all inputs (multithreaded): accscalar_t sum = 0; for (int d = threadIdx.x; d < dim; d += blockDim.x) { // contribute to loss only if not a target if (!static_cast<int>(is_target_k[d])) { scalar_t z = 1 - input_target_k + input_k[d]; if (z > 0) { sum -= g; grad_input_k[d] += g; } } } __syncthreads(); // Temporary sums (for mapreduce) __shared__ accscalar_t smem[MULTILABELMARGIN_THREADS]; accscalar_t total_sum = cuda_utils::BlockReduceSum(sum, smem); if (threadIdx.x == 0) { grad_input_k[target_idx] += static_cast<scalar_t>(total_sum); } } for (int d = threadIdx.x; d < dim; d += blockDim.x) { grad_input_k[d] *= *grad_output_k; } } void multilabel_margin_loss_forward_out_cuda_template( const Tensor& input, const Tensor& target, int64_t reduction, Tensor& output, Tensor& is_target) { check_shape(input, target); if (input.numel() == 0) { return; } auto input_ = input.contiguous(); auto target_ = target.contiguous(); auto is_target_ = is_target.contiguous(); is_target_.resize_as_(target); if (input.dim() <= 1) { int dim = input.dim() == 0 ? 1 : input.size(0); output.resize_({}); dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_forward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( output.mutable_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.mutable_data_ptr<scalar_t>(), 1, dim, reduction == at::Reduction::Mean); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } else if (input.dim() == 2) { int nframe = input.size(0); int dim = input.size(1); dim3 blocks(input.size(0)); dim3 threads(MULTILABELMARGIN_THREADS); if (reduction != at::Reduction::None) { auto output_tmp = at::empty({input_.size(0)}, input_.options()); output.resize_({}); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_forward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( output_tmp.mutable_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.mutable_data_ptr<scalar_t>(), nframe, dim, reduction == at::Reduction::Mean); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); at::cuda::sum_out( output, output_tmp, at::IntArrayRef(std::vector<int64_t>{}), false, output.scalar_type()); } else { output.resize_({input.size(0)}); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_forward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; multilabel_margin_loss_forward_kernel<scalar_t, accscalar_t> <<<blocks, threads, 0, at::cuda::getCurrentCUDAStream()>>>( output.mutable_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.mutable_data_ptr<scalar_t>(), nframe, dim, false); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } } else { TORCH_CHECK( false, "Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ", input.sizes()); } } void multilabel_margin_loss_backward_cuda_out_template( const Tensor& grad_output, const Tensor& input, const Tensor& target, int64_t reduction, const Tensor& is_target, Tensor& grad_input) { check_shape(input, target); auto input_ = input.contiguous(); if (input_.numel() == 0) { return; } grad_input.resize_as_(input_); auto target_ = target.contiguous(); auto is_target_ = is_target.contiguous(); auto grad_output_ = grad_output.contiguous(); if (grad_input.dim() <= 1) { int dim = grad_input.dim() == 0 ? 1 : grad_input.size(0); int target_size = target_.dim() == 0 ? 1 : target_.size(0); TORCH_CHECK( (target_.numel() != 0) && (target_.dim() <= 1) && (target_size == dim), "inconsistent target size"); TORCH_CHECK( target_.sizes() == is_target_.sizes(), "inconsistent is_target size"); dim3 blocks(1); dim3 threads(MULTILABELMARGIN_THREADS); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_backward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; multilabel_margin_loss_backward_kernel<scalar_t, accscalar_t> <<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( grad_input.mutable_data_ptr<scalar_t>(), grad_output_.const_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.const_data_ptr<scalar_t>(), 1, dim, reduction == at::Reduction::Mean, reduction != at::Reduction::None); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } else if (grad_input.dim() == 2) { int nframe = grad_input.size(0); int dim = grad_input.size(1); TORCH_CHECK( (input_.size(1) != 0) && (target_.dim() == 2) && (target_.size(0) == nframe) && (target_.size(1) == dim), "inconsistent target size"); TORCH_CHECK(target_.sizes() == is_target_.sizes(), "inconsistent is_target size"); dim3 blocks(grad_input.size(0)); dim3 threads(MULTILABELMARGIN_THREADS); AT_DISPATCH_FLOATING_TYPES_AND2( at::ScalarType::Half, at::ScalarType::BFloat16, input.scalar_type(), "multilabel_margin_loss_backward_kernel", [&] { using accscalar_t = at::acc_type<scalar_t, true>; multilabel_margin_loss_backward_kernel<scalar_t, accscalar_t> <<<blocks, threads, 0, c10::cuda::getCurrentCUDAStream()>>>( grad_input.mutable_data_ptr<scalar_t>(), grad_output_.const_data_ptr<scalar_t>(), input_.const_data_ptr<scalar_t>(), target_.const_data_ptr<int64_t>(), is_target_.const_data_ptr<scalar_t>(), grad_input.size(0), grad_input.size(1), reduction == at::Reduction::Mean, reduction != at::Reduction::None); C10_CUDA_KERNEL_LAUNCH_CHECK(); }); } else { TORCH_CHECK( false, "Expected 2D input with optional zero batch dim, or 1D input with non-zero dims, but got sizes: ", grad_input.sizes()); } } } // namespace std::tuple<Tensor&, Tensor&> multilabel_margin_loss_forward_out_cuda( const Tensor& self, const Tensor& target, int64_t reduction, Tensor& output, Tensor& is_target) { multilabel_margin_loss_forward_out_cuda_template( self, target, reduction, output, is_target); return std::tuple<Tensor&, Tensor&>(output, is_target); } std::tuple<Tensor, Tensor> multilabel_margin_loss_forward_cuda( const Tensor& self, const Tensor& target, int64_t reduction) { auto output = at::empty({0}, self.options()); auto is_target = at::empty({0}, self.options()); multilabel_margin_loss_forward_out_cuda_template( self, target, reduction, output, is_target); return std::make_tuple(output, is_target); } Tensor& multilabel_margin_loss_backward_cuda_out( const Tensor& grad_output, const Tensor& self, const Tensor& target, int64_t reduction, const Tensor& is_target, Tensor& grad_input) { multilabel_margin_loss_backward_cuda_out_template( grad_output, self, target, reduction, is_target, grad_input); return grad_input; } Tensor multilabel_margin_loss_backward_cuda( const Tensor& grad_output, const Tensor& self, const Tensor& target, int64_t reduction, const Tensor& is_target) { auto grad_input = at::zeros_like(self, LEGACY_CONTIGUOUS_MEMORY_FORMAT); multilabel_margin_loss_backward_cuda_out_template( grad_output, self, target, reduction, is_target, grad_input); return grad_input; } } // namespace at::native
d9daf40f9e1823672c44070d3a8b84d62b03d73e.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <vector> #include "caffe/layers/crop_layer.hpp" #include <iostream> using namespace std; namespace caffe { // Copy (one line per thread) from one array to another, with arbitrary // strides in the last two dimensions. template <typename Dtype> __global__ void copy_kernel(const int n, const int height, const int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } // recursive copy function, this function is similar to crop_copy but loops // over all but the last two dimensions. It is implemented this way to allow // for ND cropping while still relying on a CUDA kernel for the innermost // two dimensions for performance reasons. // An alternative way to implement ND cropping relying more on the kernel // would require passing offsets to the kernel, which is a bit problematic // because it is of variable length. Since in the standard (N,C,W,H) case // N,C are usually not cropped a speedup could be achieved by not looping // the application of the copy_kernel around these dimensions. template <typename Dtype> void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const vector<int>& offsets, vector<int> indices, int cur_dim, const Dtype* src_data, Dtype* dest_data, bool is_forward) { if (cur_dim + 2 < top[0]->num_axes()) { // We are not yet at the final dimension, call copy recursivley for (int i = 0; i < top[0]->shape(cur_dim); ++i) { indices[cur_dim] = i; crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1, src_data, dest_data, is_forward); } } else { // We are at the last two dimensions, which are stored continously in memory // With (N,C,H,W) // (0,1,2,3) cur_dim -> H // cur_dim+1 -> W const int lines = top[0]->shape(cur_dim); const int height = top[0]->shape(cur_dim); const int width = top[0]->shape(cur_dim+1); std::vector<int> ind_off(cur_dim+2, 0); for (int j = 0; j < cur_dim; ++j) { ind_off[j] = indices[j] + offsets[j]; } ind_off[cur_dim] = offsets[cur_dim]; ind_off[cur_dim+1] = offsets[cur_dim+1]; // Compute copy strides const int src_outer_stride = bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1); const int src_inner_stride = bottom[0]->shape(cur_dim+1); const int dest_outer_stride = top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1); const int dest_inner_stride = top[0]->shape(cur_dim+1); if (is_forward) { const Dtype* bottom_data = bottom[0]->gpu_data() + bottom[0]->offset(ind_off); Dtype* top_data = top[0]->mutable_gpu_data() + top[0]->offset(indices); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, height, width, src_outer_stride, src_inner_stride, dest_outer_stride, dest_inner_stride, bottom_data, top_data); } else { const Dtype* top_diff = top[0]->gpu_diff() + top[0]->offset(indices); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() + bottom[0]->offset(ind_off); // NOLINT_NEXT_LINE(whitespace/operators) hipLaunchKernelGGL(( copy_kernel), dim3(CAFFE_GET_BLOCKS(lines)), dim3(CAFFE_CUDA_NUM_THREADS), 0, 0, lines, height, width, dest_outer_stride, dest_inner_stride, src_outer_stride, src_inner_stride, top_diff, bottom_diff); } } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { std::vector<int> indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); std::vector<int> indices(top[0]->num_axes(), 0); crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
d9daf40f9e1823672c44070d3a8b84d62b03d73e.cu
#include <vector> #include "caffe/layers/crop_layer.hpp" #include <iostream> using namespace std; namespace caffe { // Copy (one line per thread) from one array to another, with arbitrary // strides in the last two dimensions. template <typename Dtype> __global__ void copy_kernel(const int n, const int height, const int width, const int src_outer_stride, const int src_inner_stride, const int dest_outer_stride, const int dest_inner_stride, const Dtype* src, Dtype* dest) { CUDA_KERNEL_LOOP(index, n) { int src_start = index / height * src_outer_stride + index % height * src_inner_stride; int dest_start = index / height * dest_outer_stride + index % height * dest_inner_stride; for (int i = 0; i < width; ++i) { dest[dest_start + i] = src[src_start + i]; } } } // recursive copy function, this function is similar to crop_copy but loops // over all but the last two dimensions. It is implemented this way to allow // for ND cropping while still relying on a CUDA kernel for the innermost // two dimensions for performance reasons. // An alternative way to implement ND cropping relying more on the kernel // would require passing offsets to the kernel, which is a bit problematic // because it is of variable length. Since in the standard (N,C,W,H) case // N,C are usually not cropped a speedup could be achieved by not looping // the application of the copy_kernel around these dimensions. template <typename Dtype> void CropLayer<Dtype>::crop_copy_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top, const vector<int>& offsets, vector<int> indices, int cur_dim, const Dtype* src_data, Dtype* dest_data, bool is_forward) { if (cur_dim + 2 < top[0]->num_axes()) { // We are not yet at the final dimension, call copy recursivley for (int i = 0; i < top[0]->shape(cur_dim); ++i) { indices[cur_dim] = i; crop_copy_gpu(bottom, top, offsets, indices, cur_dim+1, src_data, dest_data, is_forward); } } else { // We are at the last two dimensions, which are stored continously in memory // With (N,C,H,W) // (0,1,2,3) cur_dim -> H // cur_dim+1 -> W const int lines = top[0]->shape(cur_dim); const int height = top[0]->shape(cur_dim); const int width = top[0]->shape(cur_dim+1); std::vector<int> ind_off(cur_dim+2, 0); for (int j = 0; j < cur_dim; ++j) { ind_off[j] = indices[j] + offsets[j]; } ind_off[cur_dim] = offsets[cur_dim]; ind_off[cur_dim+1] = offsets[cur_dim+1]; // Compute copy strides const int src_outer_stride = bottom[0]->shape(cur_dim)*bottom[0]->shape(cur_dim+1); const int src_inner_stride = bottom[0]->shape(cur_dim+1); const int dest_outer_stride = top[0]->shape(cur_dim)*top[0]->shape(cur_dim+1); const int dest_inner_stride = top[0]->shape(cur_dim+1); if (is_forward) { const Dtype* bottom_data = bottom[0]->gpu_data() + bottom[0]->offset(ind_off); Dtype* top_data = top[0]->mutable_gpu_data() + top[0]->offset(indices); // NOLINT_NEXT_LINE(whitespace/operators) copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, height, width, src_outer_stride, src_inner_stride, dest_outer_stride, dest_inner_stride, bottom_data, top_data); } else { const Dtype* top_diff = top[0]->gpu_diff() + top[0]->offset(indices); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff() + bottom[0]->offset(ind_off); // NOLINT_NEXT_LINE(whitespace/operators) copy_kernel<<<CAFFE_GET_BLOCKS(lines), CAFFE_CUDA_NUM_THREADS>>>( lines, height, width, dest_outer_stride, dest_inner_stride, src_outer_stride, src_inner_stride, top_diff, bottom_diff); } } } template <typename Dtype> void CropLayer<Dtype>::Forward_gpu(const vector<Blob<Dtype>*>& bottom, const vector<Blob<Dtype>*>& top) { std::vector<int> indices(top[0]->num_axes(), 0); const Dtype* bottom_data = bottom[0]->gpu_data(); Dtype* top_data = top[0]->mutable_gpu_data(); crop_copy_gpu(bottom, top, offsets, indices, 0, bottom_data, top_data, true); } template <typename Dtype> void CropLayer<Dtype>::Backward_gpu(const vector<Blob<Dtype>*>& top, const vector<bool>& propagate_down, const vector<Blob<Dtype>*>& bottom) { const Dtype* top_diff = top[0]->gpu_diff(); Dtype* bottom_diff = bottom[0]->mutable_gpu_diff(); if (propagate_down[0]) { caffe_gpu_set(bottom[0]->count(), static_cast<Dtype>(0), bottom_diff); std::vector<int> indices(top[0]->num_axes(), 0); crop_copy_gpu(bottom, top, offsets, indices, 0, top_diff, bottom_diff, false); } } INSTANTIATE_LAYER_GPU_FUNCS(CropLayer); } // namespace caffe
a5750035d6ac0ff5065959231e0af99b693e364e.hip
// !!! This is a file automatically generated by hipify!!! /* * 713_Assignment 2 * In the assignment, I will implement a Automatic Contrast Enhancement algorithm with Parallel Reduction on CUDA. * * Algortihm and strategies are my own. * This file contains the CUDA version of the algorithm. */ #include <iostream> #include <fstream> #include <sstream> #include <stdio.h> #include <stdlib.h> #include "hip/hip_runtime.h" #include "device_launch_parameters.h" #include "npp.h" #include <windows.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <thrust/functional.h> //global variables for and function declerations for performance measurements double PCFreq = 0.0; __int64 CounterStart = 0; void StartCounter(); double GetCounter(); // Function declarations. thrust::host_vector<Npp8u> LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray); void WritePGM(char * sFileName, thrust::host_vector<Npp8u> pDst_Host, int nWidth, int nHeight, int nMaxGray); // Main function. int main(int argc, char ** argv) { // Host parameter declarations. int nWidth, nHeight, nMaxGray; std::cout << "####### THRUST VERSION #######" << std::endl; // Load image to the host. std::cout << "Load PGM file." << std::endl; thrust::host_vector<Npp8u> vecHost = LoadPGM("..\\input\\lena_before.pgm", nWidth, nHeight, nMaxGray); // Device parameter declarations. Npp8u nMin, nMax; // Copy the image from the host to GPU thrust::device_vector<Npp8u> vecDev = vecHost; std::cout << "Copy image from host to device." << std::endl; std::cout << "Process the image on GPU." << std::endl; //start counter for performance mesaurements StartCounter(); // Compute the min and the max. nMin = thrust::reduce(vecDev.begin(), vecDev.end(), nMaxGray, thrust::minimum<int>()); nMax = thrust::reduce(vecDev.begin(), vecDev.end(), 0, thrust::maximum<int>()); std::cout << "Duration after MinMax: " << GetCounter() << " microseconds" << std::endl; // Compute the optimal nConstant and nScaleFactor for integer operation see GTC 2013 Lab NPP.pptx for explanation // I will prefer integer arithmetic, Instead of using 255.0f / (nMax - nMin) directly int nScaleFactor = 0; int nPower = 1; while (nPower * 255.0f / (nMax - nMin) < 255.0f) { nScaleFactor++; nPower *= 2; } float nConstant = 255.0f / (nMax - nMin) * (nPower / 2); // Calculate nMultiplier by multiplying nConstant and divide by divider = 2 ^ (nScaleFactor-1) int nDivider = 1; for (int j = 0; j < nScaleFactor - 1; j++) nDivider <<= 1; float nMultiplier = nConstant / nDivider; // Subtract nMin and multiply by nMultiplier thrust::for_each(vecDev.begin(), vecDev.end(), thrust::placeholders::_1 = (thrust::placeholders::_1 - nMin) * nMultiplier); std::cout << "Duration of THRUST Run: " << GetCounter() << " microseconds" << std::endl; // Copy result back to the host. std::cout << "Work done! Copy the result back to host." << std::endl; vecHost = vecDev; // Output the result image. std::cout << "Output the PGM file." << std::endl; WritePGM("..\\output\\lena_after_THRUST.pgm", vecHost, nWidth, nHeight, nMaxGray); return 0; } // Disable reporting warnings on functions that were marked with deprecated. #pragma warning( disable : 4996 ) // Load PGM file. thrust::host_vector<Npp8u> LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray) { char aLine[256]; FILE * fInput = fopen(sFileName, "r"); if (fInput == 0) { perror("Cannot open file to read"); exit(EXIT_FAILURE); } // First line: version fgets(aLine, 256, fInput); std::cout << "\tVersion: " << aLine; // Second line: comment fgets(aLine, 256, fInput); std::cout << "\tComment: " << aLine; fseek(fInput, -1, SEEK_CUR); // Third line: size fscanf(fInput, "%d", &nWidth); std::cout << "\tWidth: " << nWidth; fscanf(fInput, "%d", &nHeight); std::cout << " Height: " << nHeight << std::endl; // Fourth line: max value fscanf(fInput, "%d", &nMaxGray); std::cout << "\tMax value: " << nMaxGray << std::endl; while (getc(fInput) != '\n'); // Following lines: data thrust::host_vector<Npp8u> vecHost(nWidth * nHeight); for (int i = 0; i < nHeight; ++i) for (int j = 0; j < nWidth; ++j) vecHost[i*nWidth + j] = fgetc(fInput); fclose(fInput); return vecHost; } // Write PGM image. void WritePGM(char * sFileName, thrust::host_vector<Npp8u> vecHost, int nWidth, int nHeight, int nMaxGray) { FILE * fOutput = fopen(sFileName, "wb"); if (fOutput == 0) { perror("Cannot open file to read"); exit(EXIT_FAILURE); } char * aComment = "# Created by NPP"; fprintf(fOutput, "P5\n%s\n%d %d\n%d\n", aComment, nWidth, nHeight, nMaxGray); for (int i = 0; i < nHeight; ++i) for (int j = 0; j < nWidth; ++j) fputc(vecHost[i*nWidth + j], fOutput); fclose(fOutput); } void StartCounter() { LARGE_INTEGER li; if (!QueryPerformanceFrequency(&li)) std::cout << "QueryPerformanceFrequency failed!\n"; PCFreq = double(li.QuadPart) / 1000000.0; QueryPerformanceCounter(&li); CounterStart = li.QuadPart; } double GetCounter() { LARGE_INTEGER li; QueryPerformanceCounter(&li); return double(li.QuadPart - CounterStart) / PCFreq; }
a5750035d6ac0ff5065959231e0af99b693e364e.cu
/* * 713_Assignment 2 * In the assignment, I will implement a Automatic Contrast Enhancement algorithm with Parallel Reduction on CUDA. * * Algortihm and strategies are my own. * This file contains the CUDA version of the algorithm. */ #include <iostream> #include <fstream> #include <sstream> #include <stdio.h> #include <stdlib.h> #include "cuda_runtime.h" #include "device_launch_parameters.h" #include "npp.h" #include <windows.h> #include <thrust/host_vector.h> #include <thrust/device_vector.h> #include <thrust/extrema.h> #include <thrust/functional.h> //global variables for and function declerations for performance measurements double PCFreq = 0.0; __int64 CounterStart = 0; void StartCounter(); double GetCounter(); // Function declarations. thrust::host_vector<Npp8u> LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray); void WritePGM(char * sFileName, thrust::host_vector<Npp8u> pDst_Host, int nWidth, int nHeight, int nMaxGray); // Main function. int main(int argc, char ** argv) { // Host parameter declarations. int nWidth, nHeight, nMaxGray; std::cout << "####### THRUST VERSION #######" << std::endl; // Load image to the host. std::cout << "Load PGM file." << std::endl; thrust::host_vector<Npp8u> vecHost = LoadPGM("..\\input\\lena_before.pgm", nWidth, nHeight, nMaxGray); // Device parameter declarations. Npp8u nMin, nMax; // Copy the image from the host to GPU thrust::device_vector<Npp8u> vecDev = vecHost; std::cout << "Copy image from host to device." << std::endl; std::cout << "Process the image on GPU." << std::endl; //start counter for performance mesaurements StartCounter(); // Compute the min and the max. nMin = thrust::reduce(vecDev.begin(), vecDev.end(), nMaxGray, thrust::minimum<int>()); nMax = thrust::reduce(vecDev.begin(), vecDev.end(), 0, thrust::maximum<int>()); std::cout << "Duration after MinMax: " << GetCounter() << " microseconds" << std::endl; // Compute the optimal nConstant and nScaleFactor for integer operation see GTC 2013 Lab NPP.pptx for explanation // I will prefer integer arithmetic, Instead of using 255.0f / (nMax - nMin) directly int nScaleFactor = 0; int nPower = 1; while (nPower * 255.0f / (nMax - nMin) < 255.0f) { nScaleFactor++; nPower *= 2; } float nConstant = 255.0f / (nMax - nMin) * (nPower / 2); // Calculate nMultiplier by multiplying nConstant and divide by divider = 2 ^ (nScaleFactor-1) int nDivider = 1; for (int j = 0; j < nScaleFactor - 1; j++) nDivider <<= 1; float nMultiplier = nConstant / nDivider; // Subtract nMin and multiply by nMultiplier thrust::for_each(vecDev.begin(), vecDev.end(), thrust::placeholders::_1 = (thrust::placeholders::_1 - nMin) * nMultiplier); std::cout << "Duration of THRUST Run: " << GetCounter() << " microseconds" << std::endl; // Copy result back to the host. std::cout << "Work done! Copy the result back to host." << std::endl; vecHost = vecDev; // Output the result image. std::cout << "Output the PGM file." << std::endl; WritePGM("..\\output\\lena_after_THRUST.pgm", vecHost, nWidth, nHeight, nMaxGray); return 0; } // Disable reporting warnings on functions that were marked with deprecated. #pragma warning( disable : 4996 ) // Load PGM file. thrust::host_vector<Npp8u> LoadPGM(char * sFileName, int & nWidth, int & nHeight, int & nMaxGray) { char aLine[256]; FILE * fInput = fopen(sFileName, "r"); if (fInput == 0) { perror("Cannot open file to read"); exit(EXIT_FAILURE); } // First line: version fgets(aLine, 256, fInput); std::cout << "\tVersion: " << aLine; // Second line: comment fgets(aLine, 256, fInput); std::cout << "\tComment: " << aLine; fseek(fInput, -1, SEEK_CUR); // Third line: size fscanf(fInput, "%d", &nWidth); std::cout << "\tWidth: " << nWidth; fscanf(fInput, "%d", &nHeight); std::cout << " Height: " << nHeight << std::endl; // Fourth line: max value fscanf(fInput, "%d", &nMaxGray); std::cout << "\tMax value: " << nMaxGray << std::endl; while (getc(fInput) != '\n'); // Following lines: data thrust::host_vector<Npp8u> vecHost(nWidth * nHeight); for (int i = 0; i < nHeight; ++i) for (int j = 0; j < nWidth; ++j) vecHost[i*nWidth + j] = fgetc(fInput); fclose(fInput); return vecHost; } // Write PGM image. void WritePGM(char * sFileName, thrust::host_vector<Npp8u> vecHost, int nWidth, int nHeight, int nMaxGray) { FILE * fOutput = fopen(sFileName, "wb"); if (fOutput == 0) { perror("Cannot open file to read"); exit(EXIT_FAILURE); } char * aComment = "# Created by NPP"; fprintf(fOutput, "P5\n%s\n%d %d\n%d\n", aComment, nWidth, nHeight, nMaxGray); for (int i = 0; i < nHeight; ++i) for (int j = 0; j < nWidth; ++j) fputc(vecHost[i*nWidth + j], fOutput); fclose(fOutput); } void StartCounter() { LARGE_INTEGER li; if (!QueryPerformanceFrequency(&li)) std::cout << "QueryPerformanceFrequency failed!\n"; PCFreq = double(li.QuadPart) / 1000000.0; QueryPerformanceCounter(&li); CounterStart = li.QuadPart; } double GetCounter() { LARGE_INTEGER li; QueryPerformanceCounter(&li); return double(li.QuadPart - CounterStart) / PCFreq; }
dce45070e20000199d4865bf089eafcec11eac06.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlascl2.cu normal z -> c, Fri Jan 30 19:00:09 2015 @author Theo Mary */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void clascl2_full(int m, int n, const float* D, magmaFloatComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; float mul = D[ind]; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void clascl2_lower(int m, int n, const float* D, magmaFloatComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; float mul = D[ind]; A += ind; if (ind < m) { for(int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void clascl2_upper(int m, int n, const float *D, magmaFloatComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; float mul = D[ind]; A += ind; if (ind < m) { for(int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /** Purpose ------- CLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in] dD REAL vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. \param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clascl2_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaFloat_const_ptr dD, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); if (type == MagmaLower) { hipLaunchKernelGGL(( clascl2_lower) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } else if (type == MagmaUpper) { hipLaunchKernelGGL(( clascl2_upper) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } else if (type == MagmaFull) { hipLaunchKernelGGL(( clascl2_full) , dim3(grid), dim3(threads), 0, queue , m, n, dD, dA, ldda); } } /** @see magmablas_clascl2_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clascl2( magma_type_t type, magma_int_t m, magma_int_t n, magmaFloat_const_ptr dD, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *info ) { magmablas_clascl2_q( type, m, n, dD, dA, ldda, magma_stream, info ); }
dce45070e20000199d4865bf089eafcec11eac06.cu
/* -- MAGMA (version 1.6.1) -- Univ. of Tennessee, Knoxville Univ. of California, Berkeley Univ. of Colorado, Denver @date January 2015 @generated from zlascl2.cu normal z -> c, Fri Jan 30 19:00:09 2015 @author Theo Mary */ #include "common_magma.h" #define NB 64 // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right. __global__ void clascl2_full(int m, int n, const float* D, magmaFloatComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; float mul = D[ind]; A += ind; if (ind < m) { for(int j=0; j < n; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from left edge and moving right to diagonal. __global__ void clascl2_lower(int m, int n, const float* D, magmaFloatComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; int break_d = (ind < n) ? ind : n-1; float mul = D[ind]; A += ind; if (ind < m) { for(int j=0; j <= break_d; j++ ) A[j*lda] *= mul; } } // each thread block does one NB x n block row of A. // each thread does one row, starting from right edge and moving left to diagonal. __global__ void clascl2_upper(int m, int n, const float *D, magmaFloatComplex* A, int lda) { int ind = blockIdx.x * NB + threadIdx.x; float mul = D[ind]; A += ind; if (ind < m) { for(int j=n-1; j >= ind; j--) A[j*lda] *= mul; } } /** Purpose ------- CLASCL2 scales the M by N complex matrix A by the real diagonal matrix dD. TYPE specifies that A may be full, upper triangular, lower triangular. Arguments --------- \param[in] type magma_type_t TYPE indices the storage type of the input matrix A. = MagmaFull: full matrix. = MagmaLower: lower triangular matrix. = MagmaUpper: upper triangular matrix. Other formats that LAPACK supports, MAGMA does not currently support. \param[in] m INTEGER The number of rows of the matrix A. M >= 0. \param[in] n INTEGER The number of columns of the matrix A. N >= 0. \param[in] dD REAL vector, dimension (M) The diagonal matrix containing the scalar factors. Stored as a vector. \param[in,out] dA COMPLEX array, dimension (LDDA,N) The matrix to be scaled by dD. See TYPE for the storage type. \param[in] ldda INTEGER The leading dimension of the array A. LDDA >= max(1,M). \param[out] info INTEGER - = 0: successful exit - < 0: if INFO = -i, the i-th argument had an illegal value. @param[in] queue magma_queue_t Queue to execute in. @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clascl2_q( magma_type_t type, magma_int_t m, magma_int_t n, magmaFloat_const_ptr dD, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_queue_t queue, magma_int_t *info ) { *info = 0; if ( type != MagmaLower && type != MagmaUpper && type != MagmaFull ) *info = -1; else if ( m < 0 ) *info = -2; else if ( n < 0 ) *info = -3; else if ( ldda < max(1,m) ) *info = -5; if (*info != 0) { magma_xerbla( __func__, -(*info) ); return; //info; } dim3 grid( (m + NB - 1)/NB ); dim3 threads( NB ); if (type == MagmaLower) { clascl2_lower <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } else if (type == MagmaUpper) { clascl2_upper <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } else if (type == MagmaFull) { clascl2_full <<< grid, threads, 0, queue >>> (m, n, dD, dA, ldda); } } /** @see magmablas_clascl2_q @ingroup magma_caux2 ********************************************************************/ extern "C" void magmablas_clascl2( magma_type_t type, magma_int_t m, magma_int_t n, magmaFloat_const_ptr dD, magmaFloatComplex_ptr dA, magma_int_t ldda, magma_int_t *info ) { magmablas_clascl2_q( type, m, n, dD, dA, ldda, magma_stream, info ); }
118b876f50b22d452fc199e0ddc0ad101f9921a6.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <stdio.h> __global__ void kernel( void ) { } int main( void ) { hipLaunchKernelGGL(( kernel), dim3(1),dim3(1), 0, 0, ); printf( "Hello, GPU World!\n" ); return 0; }
118b876f50b22d452fc199e0ddc0ad101f9921a6.cu
#include <stdio.h> __global__ void kernel( void ) { } int main( void ) { kernel<<<1,1>>>(); printf( "Hello, GPU World!\n" ); return 0; }
8895bab4893ff9d569d322c141afc140454132f3.hip
// !!! This is a file automatically generated by hipify!!! #include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <hip/hip_runtime.h> #include <hip/hip_runtime.h> #include <string> #define MAX_THREADS 512 #define MAX_VALUE 1 << 16 #define gpuErrCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(hipError_t code, const char*file, int line, bool abort = true) { if (code != hipSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", hipGetErrorString(code), file, line); if (abort) exit(code); } } void swap(int* a, int* b) { int tmp = *a; *a = *b; *b = *a; } __device__ void swapGpu(int* a, int* b) { int tmp = *a; *a = *b; *b = *a; } void bitonicStepCpu(int* vals, int n) { int logn = log2(n); int d = 1 << logn; --logn; for (int i = 0; i < d >> 1; ++i) { if (vals[i] > vals[d - i - 1]) swap(&vals[i], &vals[d - i - 1]); } for (int k = logn; k > 0; --k) { d = 1 << k; for (int j = 0; j < n; j += d) for (int i = 0; i < d >> 1; ++i) { if (vals[i + j] > vals[i + j + (d >> 1)]) swap(&vals[i + j], &vals[i + j + (d >> 1)]); } } } void bitonicSortCpu(int* vals, int n) { int* tmp = (int *)malloc(n * sizeof(int)); memcpy(tmp, vals, n * sizeof(int)); int logn = log2(n); for (int k = 1, d = 2; k <= logn; ++k, d <<= 1) for (int i = 0; i < n; i += d) bitonicStepCpu((int *)&tmp[i], d); memcpy(vals, tmp, n * sizeof(int)); free(tmp); return; } __global__ void bitonicStepGpu(int *deviceValues, int j, int k) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; unsigned int xor = i ^ j; if (k > i) { if ((i & k) == 0) { if (deviceValues[i] > deviceValues[xor]) swapGpu(&deviceValues[i], &deviceValues[xor]); } else { if (deviceValues[i] < deviceValues[xor]) swapGpu(&deviceValues[i], &deviceValues[xor]); } } } extern "C" void bitonicSortGpu(int *vals, int valsCnt) { int *deviceValues; size_t size = valsCnt * sizeof(int); gpuErrCheck(hipMalloc((void**)&deviceValues, size)); gpuErrCheck(hipMemcpy(deviceValues, vals, size, hipMemcpyHostToDevice)) int numThreads; int numBlocks; if (valsCnt <= MAX_THREADS) { numThreads = valsCnt; numBlocks = 1; } else { numThreads = MAX_THREADS; numBlocks = valsCnt / numThreads; } dim3 blocks(numBlocks, 1); dim3 threads(numThreads, 1); for (int k = 2; k <= valsCnt; k <<= 1) { for (int j = k >> 1; j > 0; j >>= 1) { hipLaunchKernelGGL(( bitonicStepGpu) , dim3(blocks), dim3(threads), 0, 0, deviceValues, j, k); gpuErrCheck(hipGetLastError()); } } gpuErrCheck(hipMemcpy(vals, deviceValues, size, hipMemcpyDeviceToHost)); gpuErrCheck(hipFree(deviceValues)); } double printTime(char *type, clock_t start, clock_t stop) { double time = ((double)(stop - start)) / CLOCKS_PER_SEC; printf("%s time: %.5fs\n", type, time); return time; } bool isSorted(int *vals, int n) { for (int i = 0; i < n - 1; ++i) if (vals[i] > vals[i + 1]) return false; return true; } void generateMatrix(int *vals, int n) { srand(time(NULL)); for (int i = 0; i < n; ++i) vals[i] = rand() % MAX_VALUE; } int main() { double cpuTime[25], gpuTime[25]; int sizes[25]; for (int i = 0, valsCnt = 4; valsCnt < 2 << 25; valsCnt = valsCnt << 1, ++i) { printf("Size = %d\n", valsCnt); sizes[i] = valsCnt; clock_t start, stop; int *gpuVals = (int *)malloc(valsCnt * sizeof(int)); int *cpuVals = (int *)malloc(valsCnt * sizeof(int)); generateMatrix(gpuVals, valsCnt); memcpy(cpuVals, gpuVals, valsCnt * sizeof(int)); start = clock(); bitonicSortGpu(gpuVals, valsCnt); stop = clock(); gpuTime[i] = printTime("GPU", start, stop); start = clock(); bitonicSortCpu(cpuVals, valsCnt); stop = clock(); cpuTime[i] = printTime("CPU", start, stop); free(cpuVals); free(gpuVals); } return 0; }
8895bab4893ff9d569d322c141afc140454132f3.cu
#include <stdbool.h> #include <stdlib.h> #include <stdio.h> #include <time.h> #include <math.h> #include <cuda.h> #include <cuda_runtime.h> #include <string> #define MAX_THREADS 512 #define MAX_VALUE 1 << 16 #define gpuErrCheck(ans) { gpuAssert((ans), __FILE__, __LINE__); } inline void gpuAssert(cudaError_t code, const char*file, int line, bool abort = true) { if (code != cudaSuccess) { fprintf(stderr, "GPUassert: %s %s %d\n", cudaGetErrorString(code), file, line); if (abort) exit(code); } } void swap(int* a, int* b) { int tmp = *a; *a = *b; *b = *a; } __device__ void swapGpu(int* a, int* b) { int tmp = *a; *a = *b; *b = *a; } void bitonicStepCpu(int* vals, int n) { int logn = log2(n); int d = 1 << logn; --logn; for (int i = 0; i < d >> 1; ++i) { if (vals[i] > vals[d - i - 1]) swap(&vals[i], &vals[d - i - 1]); } for (int k = logn; k > 0; --k) { d = 1 << k; for (int j = 0; j < n; j += d) for (int i = 0; i < d >> 1; ++i) { if (vals[i + j] > vals[i + j + (d >> 1)]) swap(&vals[i + j], &vals[i + j + (d >> 1)]); } } } void bitonicSortCpu(int* vals, int n) { int* tmp = (int *)malloc(n * sizeof(int)); memcpy(tmp, vals, n * sizeof(int)); int logn = log2(n); for (int k = 1, d = 2; k <= logn; ++k, d <<= 1) for (int i = 0; i < n; i += d) bitonicStepCpu((int *)&tmp[i], d); memcpy(vals, tmp, n * sizeof(int)); free(tmp); return; } __global__ void bitonicStepGpu(int *deviceValues, int j, int k) { unsigned int i = threadIdx.x + blockDim.x * blockIdx.x; unsigned int xor = i ^ j; if (k > i) { if ((i & k) == 0) { if (deviceValues[i] > deviceValues[xor]) swapGpu(&deviceValues[i], &deviceValues[xor]); } else { if (deviceValues[i] < deviceValues[xor]) swapGpu(&deviceValues[i], &deviceValues[xor]); } } } extern "C" void bitonicSortGpu(int *vals, int valsCnt) { int *deviceValues; size_t size = valsCnt * sizeof(int); gpuErrCheck(cudaMalloc((void**)&deviceValues, size)); gpuErrCheck(cudaMemcpy(deviceValues, vals, size, cudaMemcpyHostToDevice)) int numThreads; int numBlocks; if (valsCnt <= MAX_THREADS) { numThreads = valsCnt; numBlocks = 1; } else { numThreads = MAX_THREADS; numBlocks = valsCnt / numThreads; } dim3 blocks(numBlocks, 1); dim3 threads(numThreads, 1); for (int k = 2; k <= valsCnt; k <<= 1) { for (int j = k >> 1; j > 0; j >>= 1) { bitonicStepGpu <<<blocks, threads>>> (deviceValues, j, k); gpuErrCheck(cudaGetLastError()); } } gpuErrCheck(cudaMemcpy(vals, deviceValues, size, cudaMemcpyDeviceToHost)); gpuErrCheck(cudaFree(deviceValues)); } double printTime(char *type, clock_t start, clock_t stop) { double time = ((double)(stop - start)) / CLOCKS_PER_SEC; printf("%s time: %.5fs\n", type, time); return time; } bool isSorted(int *vals, int n) { for (int i = 0; i < n - 1; ++i) if (vals[i] > vals[i + 1]) return false; return true; } void generateMatrix(int *vals, int n) { srand(time(NULL)); for (int i = 0; i < n; ++i) vals[i] = rand() % MAX_VALUE; } int main() { double cpuTime[25], gpuTime[25]; int sizes[25]; for (int i = 0, valsCnt = 4; valsCnt < 2 << 25; valsCnt = valsCnt << 1, ++i) { printf("Size = %d\n", valsCnt); sizes[i] = valsCnt; clock_t start, stop; int *gpuVals = (int *)malloc(valsCnt * sizeof(int)); int *cpuVals = (int *)malloc(valsCnt * sizeof(int)); generateMatrix(gpuVals, valsCnt); memcpy(cpuVals, gpuVals, valsCnt * sizeof(int)); start = clock(); bitonicSortGpu(gpuVals, valsCnt); stop = clock(); gpuTime[i] = printTime("GPU", start, stop); start = clock(); bitonicSortCpu(cpuVals, valsCnt); stop = clock(); cpuTime[i] = printTime("CPU", start, stop); free(cpuVals); free(gpuVals); } return 0; }
e6076a42b02ca5b9fb0640c7317b9436c155a178.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" // Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/label_smooth_kernel.h" #include <vector> #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { template <typename T> struct LabelSmoothFunctor { T epsilon; T label_dim; __forceinline__ LabelSmoothFunctor(float epsilon_data, int label_dim_data) { epsilon = static_cast<T>(epsilon_data); label_dim = static_cast<T>(label_dim_data); } __device__ __forceinline__ T operator()(const T x) const { return (static_cast<T>(1 - epsilon) * x + static_cast<T>(epsilon / label_dim)); } }; template <typename T> __global__ void LabelSmoothRunDistKernel(const int N, const float epsilon, const int dist_numel, const T* src, const T* dist_data, T* dst) { CUDA_KERNEL_LOOP(idx, N) { int dist_idx = idx % dist_numel; dst[idx] = static_cast<T>(1 - epsilon) * src[idx] + static_cast<T>(epsilon) * dist_data[dist_idx]; } } template <typename T, typename Context> void LabelSmoothKernel(const Context& ctx, const DenseTensor& label, const paddle::optional<DenseTensor>& prior_dist, float epsilon, DenseTensor* out) { auto label_dim = label.dims()[label.dims().size() - 1]; auto size_prob = label.numel(); const T* in_data = label.data<T>(); T* out_data = ctx.template Alloc<T>(out); if (prior_dist.get_ptr()) { int threads = 512; int grid = (size_prob + threads - 1) / threads; auto stream = ctx.stream(); const auto* dist_t = prior_dist.get_ptr(); auto dist_numel = dist_t->numel(); const T* dist_data = dist_t->data<T>(); hipLaunchKernelGGL(( LabelSmoothRunDistKernel<T>), dim3(grid), dim3(threads), 0, stream, size_prob, epsilon, dist_numel, in_data, dist_data, out_data); } else { std::vector<const DenseTensor*> ins = {&label}; std::vector<DenseTensor*> outs = {out}; auto functor = LabelSmoothFunctor<T>(epsilon, label_dim); paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>( ctx, ins, &outs, functor); } } } // namespace phi PD_REGISTER_KERNEL( label_smooth, GPU, ALL_LAYOUT, phi::LabelSmoothKernel, float, double) {}
e6076a42b02ca5b9fb0640c7317b9436c155a178.cu
// Copyright (c) 2022 PaddlePaddle Authors. All Rights Reserved. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. #include "paddle/phi/kernels/label_smooth_kernel.h" #include <vector> #include "paddle/fluid/operators/elementwise/elementwise_op_impl.cu.h" #include "paddle/phi/backends/gpu/gpu_context.h" #include "paddle/phi/core/kernel_registry.h" namespace phi { template <typename T> struct LabelSmoothFunctor { T epsilon; T label_dim; __forceinline__ LabelSmoothFunctor(float epsilon_data, int label_dim_data) { epsilon = static_cast<T>(epsilon_data); label_dim = static_cast<T>(label_dim_data); } __device__ __forceinline__ T operator()(const T x) const { return (static_cast<T>(1 - epsilon) * x + static_cast<T>(epsilon / label_dim)); } }; template <typename T> __global__ void LabelSmoothRunDistKernel(const int N, const float epsilon, const int dist_numel, const T* src, const T* dist_data, T* dst) { CUDA_KERNEL_LOOP(idx, N) { int dist_idx = idx % dist_numel; dst[idx] = static_cast<T>(1 - epsilon) * src[idx] + static_cast<T>(epsilon) * dist_data[dist_idx]; } } template <typename T, typename Context> void LabelSmoothKernel(const Context& ctx, const DenseTensor& label, const paddle::optional<DenseTensor>& prior_dist, float epsilon, DenseTensor* out) { auto label_dim = label.dims()[label.dims().size() - 1]; auto size_prob = label.numel(); const T* in_data = label.data<T>(); T* out_data = ctx.template Alloc<T>(out); if (prior_dist.get_ptr()) { int threads = 512; int grid = (size_prob + threads - 1) / threads; auto stream = ctx.stream(); const auto* dist_t = prior_dist.get_ptr(); auto dist_numel = dist_t->numel(); const T* dist_data = dist_t->data<T>(); LabelSmoothRunDistKernel<T><<<grid, threads, 0, stream>>>( size_prob, epsilon, dist_numel, in_data, dist_data, out_data); } else { std::vector<const DenseTensor*> ins = {&label}; std::vector<DenseTensor*> outs = {out}; auto functor = LabelSmoothFunctor<T>(epsilon, label_dim); paddle::operators::LaunchSameDimsElementwiseCudaKernel<T>( ctx, ins, &outs, functor); } } } // namespace phi PD_REGISTER_KERNEL( label_smooth, GPU, ALL_LAYOUT, phi::LabelSmoothKernel, float, double) {}
60463dc8a16918a8f90e774fca2f529367037ca0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #define DEVICE_CODE #include "systemDefines.h" #include "InteractionEngine_interface.h" #include "NonBondedInteraction.h" #include "BondInteraction.h" #include "AngleInteraction.h" // #include "CellList_interface.h" #include "Auxiliary.h" texture<CoordType, 1, hipReadModeElementType> global_texRef_interaction_coord; texture<TypeType , 1, hipReadModeElementType> global_texRef_interaction_type; __constant__ InteractionType nonBondedInteractionType [MaxNumberNonBondedInteraction]; __constant__ ScalorType nonBondedInteractionParameter [MaxNumberNonBondedInteractionParameter]; __constant__ IndexType nonBondedInteractionParameterPosition [MaxNumberNonBondedInteraction]; __constant__ InteractionType bondedInteractionType [MaxNumberBondedInteraction]; __constant__ IndexType bondedInteractionParameterPosition [MaxNumberBondedInteraction]; __constant__ ScalorType bondedInteractionParameter [MaxNumberBondedInteractionParamemter]; __constant__ IndexType const_nonBondedInteractionTableLength[1]; __constant__ IndexType const_numAtomType[1]; __constant__ IndexType const_nonBondedInteractionTable [MaxLengthNonBondedInteractionTable]; void InteractionEngine::init (const MDSystem & sys, const IndexType & NTread) { hasBond = false; hasAngle = false; myBlockDim.y = 1; myBlockDim.z = 1; myBlockDim.x = NTread; IndexType nob; if (sys.ddata.numAtom % myBlockDim.x == 0){ nob = sys.ddata.numAtom / myBlockDim.x; } else { nob = sys.ddata.numAtom / myBlockDim.x + 1; } atomGridDim = toGridDim (nob); // size_t sizetype = sizeof(TypeType)*sys.ddata.numMem; hipBindTexture(0, global_texRef_interaction_coord, sys.ddata.coord, sizeof(CoordType) * sys.ddata.numMem); hipBindTexture(0, global_texRef_interaction_type, sys.ddata.type, sizeof(TypeType) * sys.ddata.numMem); checkCUDAError ("InteractionEngine::init, bind texture"); // init sum vectors sum_nb_p.reinit (sys.ddata.numAtom, NThreadForSum); sum_nb_vxx.reinit (sys.ddata.numAtom, NThreadForSum); sum_nb_vyy.reinit (sys.ddata.numAtom, NThreadForSum); sum_nb_vzz.reinit (sys.ddata.numAtom, NThreadForSum); sum_b_p.reinit (nob, NThreadForSum); sum_b_vxx.reinit (nob, NThreadForSum); sum_b_vyy.reinit (nob, NThreadForSum); sum_b_vzz.reinit (nob, NThreadForSum); sum_angle_p.reinit (nob, NThreadForSum); sum_angle_vxx.reinit (nob, NThreadForSum); sum_angle_vyy.reinit (nob, NThreadForSum); sum_angle_vzz.reinit (nob, NThreadForSum); for (IndexType i = 0; i < 8; ++i){ hipStreamCreate(&sum_stream[i]); } checkCUDAError ("InteractionEngine::init init sum statistic"); // exclusion list maxNumExclusion = 0; sharedExclusionList = false; exclusion_sbuffSize = size_t(0); } static IndexType hroundUp4 (const IndexType x) { if (x & 3 == 0){ return x; } else { return ((x >> 2) + 1) << 2; } } void InteractionEngine:: registNonBondedInteraction (const SystemNonBondedInteraction & sysNbInter) { if (! sysNbInter.beBuilt()) { throw MDExcptUnbuiltNonBondedInteraction ("InteractionEngine"); } if (sysNbInter.numberOfInteraction() > MaxNumberBondedInteraction ){ throw MDExcptExceedConstantMemLimit ( "InteractionEngine::registNonBondedInteraction", "nonBonedInteractionType", MaxNumberNonBondedInteraction * sizeof(InteractionType)); } if (sysNbInter.numberOfParameter() > MaxNumberNonBondedInteractionParameter ){ throw MDExcptExceedConstantMemLimit ( "InteractionEngine::registNonBondedInteraction", "nonBondedInteractionParameter", MaxNumberNonBondedInteractionParameter * sizeof(ScalorType)); } hipMemcpyToSymbol (nonBondedInteractionType, sysNbInter.interactionType(), sizeof(InteractionType) * sysNbInter.numberOfInteraction()); hipMemcpyToSymbol (nonBondedInteractionParameterPosition, sysNbInter.interactionParameterPosition(), sizeof(ScalorType) * sysNbInter.numberOfInteraction()); hipMemcpyToSymbol (nonBondedInteractionParameter, sysNbInter.interactionParameter(), sizeof(IndexType) * sysNbInter.numberOfParameter()); checkCUDAError ("InteractionEngine::init, init NB force setting"); IndexType tableSize = sysNbInter.interactionTableSize(); IndexType tmpNumAtomType = sysNbInter.numberOfAtomTypes(); if (tableSize > MaxLengthNonBondedInteractionTable){ throw MDExcptExceedConstantMemLimit( "InteractionEngine::registNonBondedInteraction", "nonBondedInteractionTable", MaxLengthNonBondedInteractionTable * sizeof (ScalorType)); } hipMemcpyToSymbol (const_nonBondedInteractionTableLength, &tableSize, sizeof (IndexType)); checkCUDAError ("InteractionEngine::init, const_nonBondedInteractionTableLength"); hipMemcpyToSymbol (const_numAtomType, &tmpNumAtomType, sizeof (IndexType)); checkCUDAError ("InteractionEngine::init, const_numAtomType"); hipMemcpyToSymbol (const_nonBondedInteractionTable, sysNbInter.interactionTable(), sizeof (IndexType) * tableSize); checkCUDAError ("InteractionEngine::init, const_nonBondedInteractionTable"); // applyNonBondedInteraction_CellList_sbuffSize = // sizeof(IndexType) * hroundUp4(myBlockDim.x) + // sizeof(CoordType) * hroundUp4(myBlockDim.x) + // sizeof(TypeType) * hroundUp4(myBlockDim.x); // printf ("total %d\npart1 %d\npart2 %d\npart3 %d\nround %d\n", // applyNonBondedInteraction_CellList_sbuffSize, // sizeof(IndexType) * hroundUp4(myBlockDim.x), // sizeof(CoordType) * hroundUp4(myBlockDim.x), // sizeof(TypeType) * hroundUp4(myBlockDim.x), // hroundUp4(myBlockDim.x)); // checkCUDAError ("InteractionEngine::init, init nonBondedInteractionTable"); energyCorr = sysNbInter.energyCorrection (); pressureCorr = sysNbInter.pressureCorrection (); maxNumExclusion = sysNbInter.maxNumberOfExclusion(); if (maxNumExclusion != 0){ sharedExclusionList = true; exclusion_sbuffSize = myBlockDim.x * maxNumExclusion * sizeof(IndexType); if (exclusion_sbuffSize > SystemSharedBuffSize){ sharedExclusionList = false; } } } void InteractionEngine:: registBondedInteraction (const SystemBondedInteraction & sysBdInter) { if (sysBdInter.hasBond() ){ hasBond = true; } if (sysBdInter.hasAngle()){ hasAngle = true; } if (sysBdInter.numberOfInteraction() > MaxNumberBondedInteraction ){ throw MDExcptExceedConstantMemLimit ( "InteractionEngine::registBondedInteraction", "bondedInteractionType", MaxNumberBondedInteraction * sizeof(InteractionType)); } if (sysBdInter.numberOfParameter() > MaxNumberBondedInteractionParamemter ){ throw MDExcptExceedConstantMemLimit ( "InteractionEngine::registBondedInteraction", "bondedInteractionParameter", MaxNumberBondedInteractionParamemter * sizeof(ScalorType)); } if (hasBond || hasAngle){ hipMemcpyToSymbol (bondedInteractionType, sysBdInter.interactionType(), sizeof(InteractionType) * sysBdInter.numberOfInteraction()); hipMemcpyToSymbol (bondedInteractionParameterPosition, sysBdInter.interactionParameterPosition(), sizeof(ScalorType) * sysBdInter.numberOfInteraction()); hipMemcpyToSymbol (bondedInteractionParameter, sysBdInter.interactionParameter(), sizeof(IndexType) * sysBdInter.numberOfParameter()); checkCUDAError ("InteractionEngine::init, init bond force setting"); // cal shared buff size calBondInteraction_sbuffSize = myBlockDim.x * sizeof(ScalorType); calAngleInteraction_sbuffSize = myBlockDim.x * sizeof(ScalorType); } } InteractionEngine::~InteractionEngine() { hipUnbindTexture(global_texRef_interaction_coord); hipUnbindTexture(global_texRef_interaction_type); for (IndexType i = 0; i < 8; ++i){ hipStreamDestroy(sum_stream[i]); } } void InteractionEngine::clearInteraction (MDSystem & sys) { hipLaunchKernelGGL(( clearForce) , dim3(atomGridDim), dim3(myBlockDim), 0, 0, sys.ddata.numAtom, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz); checkCUDAError ("InteractionEngine::clearInteraction"); } // nblock should be 1 and block size should be 1 __global__ void applyEnergyPressureCorrection (ScalorType * ddata, ScalorType energyCorr, ScalorType pressureCorr) { ddata[mdStatisticEnergyCorrection] = energyCorr; ddata[mdStatisticPressureCorrection] = pressureCorr; } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const NeighborList & nlist, const ExclusionList * excllist, MDTimer *timer ) { if (timer != NULL) timer->tic(mdTimeNonBondedInteraction); if (excllist == NULL){ hipLaunchKernelGGL(( calNonBondedInteraction_neighbor) , dim3(atomGridDim), dim3(myBlockDim), 0, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.ddata.rcut, sys.box, nlist.dnlist); } else{ hipLaunchKernelGGL(( calNonBondedInteraction_neighbor) , dim3(atomGridDim), dim3(myBlockDim), exclusion_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, nlist.dnlist, excllist->dexcllist, sharedExclusionList ); } checkCUDAError ("InteractionEngine::applyInteraction nb"); err.check ("interaction engine nb"); if (timer != NULL) timer->toc(mdTimeNonBondedInteraction); } // void InteractionEngine:: // applyNonBondedInteraction (MDSystem & sys, // const CellList & clist, // const ScalorType & rcut, // NeighborList & nlist, // MDTimer *timer ) // { // if (timer != NULL) timer->tic(mdTimeBuildNeighborList); // size_t applyNonBondedInteraction_CellList_sbuffSize = // (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * // hroundUp4(clist.getBlockDim().x); // // sizeof(IndexType) * hroundUp4(myBlockDim.x) + // // sizeof(CoordType) * hroundUp4(myBlockDim.x) + // // sizeof(TypeType) * hroundUp4(myBlockDim.x); // calNonBondedInteraction // <<<clist.getCellGrimDim(), clist.getBlockDim(), // applyNonBondedInteraction_CellList_sbuffSize>>> ( // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.forcx, // sys.ddata.forcy, // sys.ddata.forcz, // sys.ddata.type, // sys.box, // clist.dclist, // rcut, // nlist.dnlist, // err.ptr_de); // checkCUDAError ("InteractionEngine::applyInteraction nb"); // err.check ("interaction engine nb"); // if (timer != NULL) timer->toc(mdTimeBuildNeighborList); // } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const NeighborList & nlist, MDStatistic & st, const ExclusionList * excllist, MDTimer *timer ) { if (timer != NULL) timer->tic(mdTimeNBInterStatistic); if (excllist == NULL){ hipLaunchKernelGGL(( calNonBondedInteraction_neighbor) , dim3(atomGridDim), dim3(myBlockDim), 0, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.ddata.rcut, sys.box, nlist.dnlist , sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff ); } else { hipLaunchKernelGGL(( calNonBondedInteraction_neighbor) , dim3(atomGridDim), dim3(myBlockDim), exclusion_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, nlist.dnlist , excllist->dexcllist, sharedExclusionList, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff ); } checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)"); err.check ("interaction engine nb"); hipDeviceSynchronize(); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z; volumei = 1.f / volumei; // printf ("apply Ec %f, Pc %f\n", // energyCorr * volumei, // pressureCorr * volumei * volumei); hipLaunchKernelGGL(( applyEnergyPressureCorrection) , dim3(1), dim3(1), 0, 0, st.ddata, energyCorr * volumei, pressureCorr * volumei * volumei); hipDeviceSynchronize(); if (timer != NULL) timer->toc(mdTimeNBInterStatistic); } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const CellList & clist, const ScalorType & rcut, MDTimer *timer ) { if (!clist.isempty()){ if (timer != NULL) timer->tic(mdTimeNonBondedInteraction); size_t applyNonBondedInteraction_CellList_sbuffSize = (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(clist.getCellBlockDim().x); hipLaunchKernelGGL(( calNonBondedInteraction_cell) , dim3(clist.getCellGrimDim()), dim3(clist.getCellBlockDim()), applyNonBondedInteraction_CellList_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, clist.dclist, rcut, err.ptr_de); checkCUDAError ("InteractionEngine::applyInteraction nb"); err.check ("interaction engine nb"); if (timer != NULL) timer->toc(mdTimeNonBondedInteraction); } else { applyNonBondedInteraction (sys, rcut, timer); } } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const CellList & clist, const ScalorType & rcut, MDStatistic & st, MDTimer *timer ) { if (!clist.isempty()){ if (timer != NULL) timer->tic(mdTimeNBInterStatistic); size_t applyNonBondedInteraction_CellList_sbuffSize = (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(clist.getCellBlockDim().x); hipLaunchKernelGGL(( calNonBondedInteraction_cell) , dim3(clist.getCellGrimDim()), dim3(clist.getCellBlockDim()), applyNonBondedInteraction_CellList_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, clist.dclist, rcut, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de ); checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)"); err.check ("interaction engine nb"); hipDeviceSynchronize(); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z; volumei = 1.f / volumei; // printf ("apply Ec %f, Pc %f\n", // energyCorr * volumei, // pressureCorr * volumei * volumei); hipLaunchKernelGGL(( applyEnergyPressureCorrection) , dim3(1), dim3(1), 0, 0, st.ddata, energyCorr * volumei, pressureCorr * volumei * volumei); hipDeviceSynchronize(); if (timer != NULL) timer->toc(mdTimeNBInterStatistic); } else { applyNonBondedInteraction (sys, rcut, st, timer); } } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const ScalorType & rcut, MDTimer *timer ) { if (timer != NULL) timer->tic(mdTimeNonBondedInteraction); size_t applyNonBondedInteraction_AllPair_sbuffSize = (sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(myBlockDim.x); hipLaunchKernelGGL(( calNonBondedInteraction_all) , dim3(atomGridDim), dim3(myBlockDim), applyNonBondedInteraction_AllPair_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, rcut, err.ptr_de); checkCUDAError ("InteractionEngine::applyInteraction nb"); err.check ("interaction engine nb"); if (timer != NULL) timer->toc(mdTimeNonBondedInteraction); } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const ScalorType & rcut, MDStatistic & st, MDTimer *timer ) { if (timer != NULL) timer->tic(mdTimeNBInterStatistic); size_t applyNonBondedInteraction_AllPair_sbuffSize = (sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(myBlockDim.x); hipLaunchKernelGGL(( calNonBondedInteraction_all) , dim3(atomGridDim), dim3(myBlockDim), applyNonBondedInteraction_AllPair_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, rcut, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)"); err.check ("interaction engine nb"); hipDeviceSynchronize(); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z; volumei = 1.f / volumei; // printf ("apply Ec %f, Pc %f\n", // energyCorr * volumei, // pressureCorr * volumei * volumei); hipLaunchKernelGGL(( applyEnergyPressureCorrection) , dim3(1), dim3(1), 0, 0, st.ddata, energyCorr * volumei, pressureCorr * volumei * volumei); hipDeviceSynchronize(); if (timer != NULL) timer->toc(mdTimeNBInterStatistic); } // void InteractionEngine:: // applyNonBondedInteraction (MDSystem & sys, // const CellList & clist, // const ScalorType & rcut, // NeighborList & nlist, // MDStatistic & st, // MDTimer *timer ) // { // if (timer != NULL) timer->tic(mdTimeBuildNeighborList); // if (!clist.isempty()){ // size_t applyNonBondedInteraction_CellList_sbuffSize = // (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * // hroundUp4(clist.getBlockDim().x); // calNonBondedInteraction // <<<clist.getCellGrimDim(), clist.getBlockDim(), // applyNonBondedInteraction_CellList_sbuffSize>>> ( // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.forcx, // sys.ddata.forcy, // sys.ddata.forcz, // sys.ddata.type, // sys.box, // clist.dclist, // rcut, // nlist.dnlist, // sum_nb_p.buff, // sum_nb_vxx.buff, // sum_nb_vyy.buff, // sum_nb_vzz.buff, // err.ptr_de // ); // } // checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)"); // err.check ("interaction engine nb"); // hipDeviceSynchronize(); // sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); // sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX, 1); // sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY, 2); // sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ, 3); // ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z; // volumei = 1.f / volumei; // // printf ("apply Ec %f, Pc %f\n", // // energyCorr * volumei, // // pressureCorr * volumei * volumei); // applyEnergyPressureCorrection // <<<1, 1, 0, 4>>> (st.ddata, // energyCorr * volumei, // pressureCorr * volumei * volumei); // hipDeviceSynchronize(); // if (timer != NULL) timer->toc(mdTimeBuildNeighborList); // } void InteractionEngine:: calTwinRangeCorrection (const MDSystem & sys, const CellList & clist, const ScalorType & rcut1, const ScalorType & rcut2, TwinRangeCorrectionRecorder & twrec, MDTimer * timer) { if (timer != NULL) timer->tic(mdTimeNBInterTwinRange); if (clist.isempty()){ size_t applyNonBondedInteraction_AllPair_sbuffSize = (sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(myBlockDim.x); hipLaunchKernelGGL(( calTwinRangeCorrection_all) , dim3(atomGridDim), dim3(myBlockDim), applyNonBondedInteraction_AllPair_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, twrec.forcx, twrec.forcy, twrec.forcz, sys.ddata.type, sys.box, rcut1, rcut2, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); } else { size_t applyNonBondedInteraction_CellList_sbuffSize = (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(clist.getCellBlockDim().x); hipLaunchKernelGGL(( calTwinRangeCorrection_cell) , dim3(clist.getCellGrimDim()), dim3(clist.getCellBlockDim()), applyNonBondedInteraction_CellList_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, twrec.forcx, twrec.forcy, twrec.forcz, sys.ddata.type, sys.box, clist.dclist, rcut1, rcut2, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); } checkCUDAError ("TwinRangeCorrectionRecorder::calTwinRangeCorrection"); err.check ("TwinRangeCorrectionRecorder::calTwinRangeCorrection"); hipDeviceSynchronize(); MDStatistic st (sys); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); hipDeviceSynchronize(); st.updateHost (); twrec.energyCorrection() = st.nonBondedEnergy(); twrec.pressureCorrection() = st.pressure(sys.box); if (timer != NULL) timer->toc(mdTimeNBInterTwinRange); } void InteractionEngine:: buildNeighborListCalTwinRangeCorrection (const MDSystem & sys, const CellList & clist, const ScalorType & rcut1, const ScalorType & rcut2, NeighborList & nlist, TwinRangeCorrectionRecorder & twrec, MDTimer * timer) { if (timer != NULL) timer->tic(mdTimeBuildNeighborList); if (clist.isempty()){ size_t applyNonBondedInteraction_AllPair_sbuffSize = (sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(myBlockDim.x); hipLaunchKernelGGL(( buildNeighborListCalTwinRangeCorr_all) , dim3(atomGridDim), dim3(myBlockDim), applyNonBondedInteraction_AllPair_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, twrec.forcx, twrec.forcy, twrec.forcz, sys.ddata.type, sys.box, rcut1, rcut2, nlist.dnlist, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); } else { size_t applyNonBondedInteraction_CellList_sbuffSize = (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(clist.getCellBlockDim().x); hipLaunchKernelGGL(( buildNeighborListCalTwinRangeCorr_cell) , dim3(clist.getCellGrimDim()), dim3(clist.getCellBlockDim()), applyNonBondedInteraction_CellList_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, twrec.forcx, twrec.forcy, twrec.forcz, sys.ddata.type, sys.box, clist.dclist, rcut1, rcut2, nlist.dnlist, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); } checkCUDAError ("TwinRangeCorrectionRecorder::calTwinRangeCorrection"); err.check ("TwinRangeCorrectionRecorder::calTwinRangeCorrection"); hipDeviceSynchronize(); MDStatistic st (sys); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); hipDeviceSynchronize(); st.updateHost (); twrec.energyCorrection() = st.nonBondedEnergy(); twrec.pressureCorrection() = st.pressure(sys.box); if (timer != NULL) timer->toc(mdTimeBuildNeighborList); } void InteractionEngine:: applyBondedInteraction (MDSystem & sys, const BondedInteractionList & bdlist, MDTimer *timer ) { if (hasBond) { if (timer != NULL) timer->tic(mdTimeBondedInteraction); hipLaunchKernelGGL(( calBondInteraction) , dim3(atomGridDim), dim3(myBlockDim), 0, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.box, bdlist.dbondlist); checkCUDAError ("InteractionEngine::applyInteraction bonded"); err.check ("interaction engine b"); if (timer != NULL) timer->toc(mdTimeBondedInteraction); } if (hasAngle){ if (timer != NULL) timer->tic(mdTimeAngleInteraction); hipLaunchKernelGGL(( calAngleInteraction) , dim3(atomGridDim), dim3(myBlockDim), 0, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.box, bdlist.danglelist); checkCUDAError ("InteractionEngine::applyInteraction angle"); err.check ("interaction engine angle"); if (timer != NULL) timer->toc(mdTimeAngleInteraction); } } void InteractionEngine:: applyBondedInteraction (MDSystem & sys, const BondedInteractionList & bdlist, MDStatistic & st, MDTimer *timer) { if (hasBond) { if (timer != NULL) timer->tic(mdTimeBInterStatistic); hipLaunchKernelGGL(( calBondInteraction) , dim3(atomGridDim), dim3(myBlockDim), calBondInteraction_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.box, bdlist.dbondlist , sum_b_p.buff, sum_b_vxx.buff, sum_b_vyy.buff, sum_b_vzz.buff, err.ptr_de ); checkCUDAError ("InteractionEngine::applyInteraction bonded (with statistic)"); err.check ("interaction engine"); if (timer != NULL) timer->toc(mdTimeBInterStatistic); } if (hasBond) { if (timer != NULL) timer->tic(mdTimeBInterStatistic); hipDeviceSynchronize(); sum_b_p.sumBuffAdd(st.ddata, mdStatisticBondedPotential); sum_b_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_b_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_b_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); hipDeviceSynchronize(); if (timer != NULL) timer->toc(mdTimeBInterStatistic); checkCUDAError ("InteractionEngine::applyInteraction sum bond statistic (with statistic)"); } if (hasAngle){ if (timer != NULL) timer->tic(mdTimeAngleInterStatistic); hipLaunchKernelGGL(( calAngleInteraction) , dim3(atomGridDim), dim3(myBlockDim), calAngleInteraction_sbuffSize, 0, sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.box, bdlist.danglelist, sum_angle_p.buff, sum_angle_vxx.buff, sum_angle_vyy.buff, sum_angle_vzz.buff, err.ptr_de); checkCUDAError ("InteractionEngine::applyInteraction angle"); err.check ("interaction engine angle"); if (timer != NULL) timer->toc(mdTimeAngleInterStatistic); } if (hasAngle){ if (timer != NULL) timer->tic(mdTimeAngleInterStatistic); sum_angle_p.sumBuffAdd(st.ddata, mdStatisticBondedPotential); sum_angle_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_angle_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_angle_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); hipDeviceSynchronize(); if (timer != NULL) timer->toc(mdTimeAngleInterStatistic); checkCUDAError ("InteractionEngine::applyInteraction sum angle statistic (with statistic)"); } } // void InteractionEngine:: // calculateWidomDeltaEnergy (const MDSystem & sys, // const NeighborList & nlist, // WidomTestParticleInsertion_NVT & wtest, // MDTimer * timer ) // { // if (timer != NULL) timer->tic(mdTimeNBInterStatistic); // // printf ("### %d\n", nlist.mode); // if (nlist.mode == CellListBuilt){ // // printf ("### here %f\n", wtest.energyCorrection()); // widomDeltaPoten_NVT // <<<toGridDim(wtest.numTestParticle()), // nlist.myBlockDim.x, // nlist.myBlockDim.x * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.dclist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // else if (nlist.mode == AllPairBuilt){ // // printf ("### here %f\n", wtest.energyCorrection()); // widomDeltaPoten_allPair_NVT // <<<toGridDim(wtest.numTestParticle()), // DefaultNThreadPerBlock, // DefaultNThreadPerBlock * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.myrlist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // if (timer != NULL) timer->toc(mdTimeNBInterStatistic); // } // void InteractionEngine:: // calculateWidomDeltaEnergy (const MDSystem & sys, // const NeighborList & nlist, // WidomTestParticleInsertion_NVT2 & wtest, // MDTimer * timer ) // { // if (timer != NULL) timer->tic(mdTimeNBInterStatistic); // // printf ("### %d\n", nlist.mode); // if (nlist.mode == CellListBuilt){ // // printf ("### here %f\n", wtest.energyCorrection()); // widomDeltaPoten_NVT // <<<toGridDim(wtest.numTestParticle()), // nlist.myBlockDim.x, // nlist.myBlockDim.x * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.dclist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // if (timer != NULL) timer->toc(mdTimeNBInterStatistic); // } // void InteractionEngine:: // calculateWidomDeltaEnergy (const MDSystem & sys, // const NeighborList & nlist, // WidomTestParticleInsertion_NPT & wtest, // MDTimer * timer ) // { // if (timer != NULL) timer->tic(mdTimeNBInterStatistic); // // printf ("### %d\n", nlist.mode); // if (nlist.mode == CellListBuilt){ // // printf ("### here %f, n: %d\n", wtest.energyCorrection(), wtest.numTestParticle()); // widomDeltaPoten_NVT // <<<toGridDim(wtest.numTestParticle()), // nlist.myBlockDim.x, // nlist.myBlockDim.x * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.dclist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // else if (nlist.mode == AllPairBuilt){ // // printf ("### here %f\n", wtest.energyCorrection()); // widomDeltaPoten_allPair_NVT // <<<toGridDim(wtest.numTestParticle()), // DefaultNThreadPerBlock, // DefaultNThreadPerBlock * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.myrlist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // // for (unsigned i = 0; i < wtest.numTestParticle(); ++i){ // // printf ("%d %f (%f %f %f)\n", i, // // wtest.sumExpDeltaU.buff[i], // // wtest.coordTestParticle[i].x, // // wtest.coordTestParticle[i].y, // // wtest.coordTestParticle[i].z // // ); // // } // if (timer != NULL) timer->toc(mdTimeNBInterStatistic); // } __global__ void clearForce (const IndexType numAtom, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType ii = threadIdx.x + bid * blockDim.x; if (ii < numAtom) { forcx[ii] = 0.0f; forcy[ii] = 0.0f; forcz[ii] = 0.0f; } } // __global__ void // calNonBondedInteraction (const CoordType * coord, // const TypeType * type, // DeviceCellListData clist, // DeviceCellListProperty clistPro, // ScalorType * forcx, // ScalorType * forcy, // ScalorType * forcz, // bool sharednbForceTable) // { // IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; // IndexType tid = threadIdx.x; // ScalorType fsumx(0.f), fsumy(0.f), fsumz(0.f); // extern __shared__ volatile char pub_sbuff[]; // volatile IndexType * targetIndex = (volatile IndexType *) pub_sbuff; // CoordType * targetCoord = (CoordType *) &targetIndex[roundUp4(blockDim.x)]; // volatile TypeType * targetType = (volatile TypeType *) &targetCoord[roundUp4(blockDim.x)]; // __syncthreads(); // IndexType ii = get (clist, bid, tid); // CoordType ref; // TypeType refType; // if (ii != MaxIndexValue){ // ref = tex1Dfetch (global_texRef_interaction_coord, ii); // refType = tex1Dfetch(global_texRef_interaction_type, ii); // } // for (unsigned i = 0; i < numNeighborCell(clistPro, bid); ++i){ // __syncthreads(); // IndexType targetCellIndex = getTargetCellIndex (clistPro, bid, i); // CoordType shift = getShiftValue (clistPro, bid, i); // IndexType targetIndex[tid] = get (clist, targetCellIndex, tid); // if (targetIndex[tid] != MaxIndexValue){ // targetCoord[tid] = tex1Dfetch (global_texRef_interaction_coord, targetIndexes[tid]); // targetType[tid] = tex1Dfetch (global_texRef_interaction_type, targetIndexes[tid]); // } // __syncthreads (); // if (ii != MaxIndexValue){ // for (IndexType jj = 0; jj < blockDim.x; ++jj){ // if (targetIndex[jj] == MaxIndexValue) continue; // ScalorType diffx = targetCoord[jj].x + shift.x - ref.x; // ScalorType diffy = targetCoord[jj].y + shift.y - ref.y; // ScalorType diffz = targetCoord[jj].z + shift.z - ref.z; // if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 && // targetIndex[jj] != ii){ // ForceIndexType fidx; // if (sharednbForceTable){ // fidx = nonBondedInteractionTableItem ( // nonBondedInteractionTable, const_numAtomType, refType, targetType[jj]); // } // else { // fidx = nonBondedInteractionTableItem ( // nonBondedInteractionTable, const_numAtomType, refType, targetType[jj]); // } // ScalorType fx, fy, fz; // nbforce (nonBondedInteractionType[fidx], // &nonBondedInteractionParameter // [nonBondedInteractionParameterPosition[fidx]], // diffx, diffy, diffz, // &fx, &fy, &fz); // fsumx += fx; // fsumy += fy; // fsumz += fz; // } // } // } // } // if (ii != MaxIndexValue){ // forcx[ii] += fsumx; // forcy[ii] += fsumy; // forcz[ii] += fsumz; // } // } __global__ void calNonBondedInteraction_neighbor (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const ScalorType * rcut, const RectangularBox box, const DeviceNeighborList nlist) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; if (ii < numAtom) { CoordType ref (tex1Dfetch(global_texRef_interaction_coord, ii)); ScalorType refrcut2 = rcut[ii]; refrcut2 = refrcut2 * refrcut2; ScalorType fx(0.f), fy(0.f), fz(0.f); for (IndexType jj = 0, nlistPosi = ii; jj < nlist.Nneighbor[ii]; ++jj, nlistPosi += nlist.stride){ IndexType targetIdx ( nlist.data [nlistPosi] ); IndexType nbForceIndex ( nlist.forceIndex [nlistPosi] ); CoordType target ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) ); ScalorType diffx ( target.x - ref.x ); ScalorType diffy ( target.y - ref.y ); ScalorType diffz ( target.z - ref.z ); shortestImage (box, &diffx, &diffy, &diffz); ScalorType rcut2 = target.w * target.w; if (rcut2 < refrcut2) rcut2 = refrcut2; if (diffx*diffx + diffy*diffy + diffz*diffz < rcut2){ // if (diffx*diffx + diffy*diffy + diffz*diffz < refrcut2){ nbForce (nonBondedInteractionType[nbForceIndex], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[nbForceIndex]], diffx, diffy, diffz, &fx, &fy, &fz); fsumx += fx; fsumy += fy; fsumz += fz; } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calNonBondedInteraction_neighbor (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceNeighborList nlist, const DeviceExclusionList dexcllist, const bool sharedExclusionList) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; IndexType * ptr_excllist; IndexType myNumExclusion (0); extern __shared__ char excl_sbuff[]; if (dexcllist.maxNumExclusion != 0 && ii < numAtom){ myNumExclusion = dexcllist.numExclusion[ii]; if (sharedExclusionList){ ptr_excllist = (IndexType *) excl_sbuff; for (IndexType jj = 0; jj < myNumExclusion; ++jj){ ptr_excllist[jj*blockDim.x+tid] = dexcllist.exclusionNeighborIndex[jj*dexcllist.stride+ii]; } } } if (ii < numAtom) { CoordType ref = tex1Dfetch(global_texRef_interaction_coord, ii); ScalorType fx(0.f), fy(0.f), fz(0.f); for (IndexType jj = 0, nlistPosi = ii; jj < nlist.Nneighbor[ii]; ++jj, nlistPosi += nlist.stride){ IndexType targetIdx ( nlist.data [nlistPosi] ); IndexType nbForceIndex; CoordType target; ScalorType diffx, diffy, diffz; if (sharedExclusionList){ for (IndexType kk = 0; kk < myNumExclusion; ++kk){ if (ptr_excllist[kk*blockDim.x+tid] == targetIdx) { goto skipInter; } } } else { for (IndexType kk = 0; kk < myNumExclusion; ++kk){ if (dexcllist.exclusionNeighborIndex[kk*dexcllist.stride+ii] == targetIdx) { goto skipInter; } } } nbForceIndex = ( nlist.forceIndex [nlistPosi] ); target = ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) ); diffx = ( target.x - ref.x ); diffy = ( target.y - ref.y ); diffz = ( target.z - ref.z ); shortestImage (box, &diffx, &diffy, &diffz); nbForce (nonBondedInteractionType[nbForceIndex], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[nbForceIndex]], diffx, diffy, diffz, &fx, &fy, &fz); fsumx += fx; fsumy += fy; fsumz += fz; skipInter: { } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calNonBondedInteraction_neighbor (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const ScalorType * rcut, const RectangularBox box, const DeviceNeighborList nlist, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f; if (ii < numAtom) { CoordType ref; ref = tex1Dfetch(global_texRef_interaction_coord, ii); ScalorType refrcut2 = rcut[ii]; refrcut2 = refrcut2 * refrcut2; ScalorType fx(0.f), fy(0.f), fz(0.f); ScalorType dp; for (IndexType jj = 0, nlistPosi = ii; jj < nlist.Nneighbor[ii]; ++jj, nlistPosi += nlist.stride){ IndexType targetIdx ( nlist.data[nlistPosi] ); IndexType nbForceIndex ( nlist.forceIndex [nlistPosi] ); CoordType target ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) ); ScalorType diffx ( target.x - ref.x ); ScalorType diffy ( target.y - ref.y ); ScalorType diffz ( target.z - ref.z ); shortestImage (box, &diffx, &diffy, &diffz); ScalorType rcut2 = target.w * target.w; if (rcut2 < refrcut2) rcut2 = refrcut2; if (diffx*diffx + diffy*diffy + diffz*diffz < rcut2){ // if (diffx*diffx + diffy*diffy + diffz*diffz < refrcut2){ nbForcePoten (nonBondedInteractionType[nbForceIndex], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[nbForceIndex]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); // printf ("## %d\t%d\t%f\t%f\t%f\n", // ii, targetIdx, // ref.z, target.z, fz); // printf ("%f, %f %f %f, %f %f %f, %f %f %f, %f\n", // sqrtf(diffx*diffx+diffy*diffy+diffz*diffz), // ref.x, ref.y, ref.z, // target.x, target.y, target.z, // diffx, diffy, diffz, // dp // ); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } if (ii < numAtom){ statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calNonBondedInteraction_neighbor (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceNeighborList nlist, const DeviceExclusionList dexcllist, const bool sharedExclusionList, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f; IndexType * ptr_excllist; IndexType myNumExclusion (0); extern __shared__ char excl_sbuff[]; if (dexcllist.maxNumExclusion != 0 && ii < numAtom){ myNumExclusion = dexcllist.numExclusion[ii]; if (sharedExclusionList){ ptr_excllist = (IndexType *) excl_sbuff; for (IndexType jj = 0; jj < myNumExclusion; ++jj){ ptr_excllist[jj*blockDim.x+tid] = dexcllist.exclusionNeighborIndex[jj*dexcllist.stride+ii]; } } } if (ii < numAtom) { CoordType ref; ref = tex1Dfetch(global_texRef_interaction_coord, ii); ScalorType fx(0.f), fy(0.f), fz(0.f); ScalorType dp; for (IndexType jj = 0, nlistPosi = ii; jj < nlist.Nneighbor[ii]; ++jj, nlistPosi += nlist.stride){ IndexType targetIdx ( nlist.data[nlistPosi] ); IndexType nbForceIndex; CoordType target; ScalorType diffx, diffy, diffz; if (sharedExclusionList){ for (IndexType kk = 0; kk < myNumExclusion; ++kk){ if (ptr_excllist[kk*blockDim.x+tid] == targetIdx) { goto skipInter; } } } else { for (IndexType kk = 0; kk < myNumExclusion; ++kk){ if (dexcllist.exclusionNeighborIndex[kk*dexcllist.stride+ii] == targetIdx) { goto skipInter; } } } nbForceIndex = ( nlist.forceIndex [nlistPosi] ); target = ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) ); diffx = ( target.x - ref.x ); diffy = ( target.y - ref.y ); diffz = ( target.z - ref.z ); shortestImage (box, &diffx, &diffy, &diffz); nbForcePoten (nonBondedInteractionType[nbForceIndex], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[nbForceIndex]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); // printf ("## %d\t%d\t%f\t%f\t%f\n", // ii, targetIdx, // ref.z, target.z, fz); // printf ("%f, %f %f %f, %f %f %f, %f %f %f, %f\n", // sqrtf(diffx*diffx+diffy*diffy+diffz*diffz), // ref.x, ref.y, ref.z, // target.x, target.y, target.z, // diffx, diffy, diffz, // dp // ); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; skipInter: { } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } if (ii < numAtom){ statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calBondInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const RectangularBox box, const DeviceBondList bdlist) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; if (ii >= numAtom) return; CoordType ref; #ifdef COMPILE_NO_TEX ref = coord[ii]; #else ref = tex1Dfetch(global_texRef_interaction_coord, ii); #endif IndexType myNumBond = bdlist.numBond[ii]; for (IndexType jj = 0; jj < bdlist.maxNumBond; ++jj){ if (jj == myNumBond) break; IndexType targetIdx = bdlist.bondNeighborIndex[jj * bdlist.stride + ii]; CoordType target; #ifdef COMPILE_NO_TEX target = coord[targetIdx]; #else target = tex1Dfetch(global_texRef_interaction_coord, targetIdx); #endif ScalorType diffx, diffy, diffz; diffx = target.x - ref.x; diffy = target.y - ref.y; diffz = target.z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType fx, fy, fz; IndexType bondFindex = bdlist.bondIndex[jj * bdlist.stride + ii]; bondForce (bondedInteractionType[bondFindex], &bondedInteractionParameter [bondedInteractionParameterPosition[bondFindex]], diffx, diffy, diffz, &fx, &fy, &fz); fsumx += fx; fsumy += fy; fsumz += fz; } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } __global__ void calBondInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const RectangularBox box, const DeviceBondList bdlist, ScalorType * statistic_b_buff0, ScalorType * statistic_b_buff1, ScalorType * statistic_b_buff2, ScalorType * statistic_b_buff3, mdError_t * ptr_de) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; extern __shared__ volatile ScalorType buff[]; buff[tid] = 0.f; __syncthreads(); ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f; if (ii < numAtom) { CoordType ref; #ifdef COMPILE_NO_TEX ref = coord[ii]; #else ref = tex1Dfetch(global_texRef_interaction_coord, ii); #endif IndexType myNumBond = bdlist.numBond[ii]; for (IndexType jj = 0; jj < bdlist.maxNumBond; ++jj){ if (jj == myNumBond) break; IndexType targetIdx = bdlist.bondNeighborIndex[jj * bdlist.stride + ii]; CoordType target; #ifdef COMPILE_NO_TEX target = coord[targetIdx]; #else target = tex1Dfetch(global_texRef_interaction_coord, targetIdx); #endif ScalorType diffx, diffy, diffz; diffx = target.x - ref.x; diffy = target.y - ref.y; diffz = target.z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType fx, fy, fz; IndexType bondFindex = bdlist.bondIndex[jj * bdlist.stride + ii]; ScalorType dp; bondForcePoten (bondedInteractionType[bondFindex], &bondedInteractionParameter [bondedInteractionParameterPosition[bondFindex]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } buff[tid] = myPoten * 0.5f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff0[bid] = buff[0]; __syncthreads(); buff[tid] = myVxx * 0.5f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff1[bid] = buff[0]; __syncthreads(); buff[tid] = myVyy * 0.5f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff2[bid] = buff[0]; __syncthreads(); buff[tid] = myVzz * 0.5f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff3[bid] = buff[0]; __syncthreads(); } __global__ void calAngleInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const RectangularBox box, const DeviceAngleList anglelist) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; IndexType myNumAngle; if (ii < numAtom){ myNumAngle = anglelist.numAngle[ii]; } else { myNumAngle = 0; return ; } // if (__all(myNumAngle == 0)) return ; if (ii < numAtom) { CoordType ref; #ifdef COMPILE_NO_TEX ref = coord[ii]; #else ref = tex1Dfetch(global_texRef_interaction_coord, ii); #endif for (IndexType jj = 0; jj < myNumAngle; ++jj){ IndexType targetIdx0 = anglelist.angleNeighborIndex[((jj<<1) ) * anglelist.stride + ii]; IndexType targetIdx1 = anglelist.angleNeighborIndex[((jj<<1)+1) * anglelist.stride + ii]; IndexType myPosi = anglelist.anglePosi[jj * anglelist.stride + ii]; CoordType target0, target1; #ifdef COMPILE_NO_TEX target0 = coord[targetIdx0]; target1 = coord[targetIdx1]; #else target0 = tex1Dfetch(global_texRef_interaction_coord, targetIdx0); target1 = tex1Dfetch(global_texRef_interaction_coord, targetIdx1); #endif ScalorType diff0x, diff0y, diff0z; ScalorType diff1x, diff1y, diff1z; bool center (myPosi == 1); if (center){ diff0x = ref.x - target0.x; diff0y = ref.y - target0.y; diff0z = ref.z - target0.z; diff1x = target1.x - ref.x; diff1y = target1.y - ref.y; diff1z = target1.z - ref.z; } else { diff0x = target0.x - ref.x; diff0y = target0.y - ref.y; diff0z = target0.z - ref.z; diff1x = target1.x - target0.x; diff1y = target1.y - target0.y; diff1z = target1.z - target0.z; } shortestImage (box, &diff0x, &diff0y, &diff0z); shortestImage (box, &diff1x, &diff1y, &diff1z); ScalorType f0x, f0y, f0z; ScalorType f1x, f1y, f1z; IndexType angleFindex = anglelist.angleIndex[jj * anglelist.stride + ii]; angleForce (center, bondedInteractionType[angleFindex], &bondedInteractionParameter [bondedInteractionParameterPosition[angleFindex]], diff0x, diff0y, diff0z, diff1x, diff1y, diff1z, &f0x, &f0y, &f0z, &f1x, &f1y, &f1z); if (center){ fsumx += f0x + f1x; fsumy += f0y + f1y; fsumz += f0z + f1z; } else { fsumx -= f0x; fsumy -= f0y; fsumz -= f0z; } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calAngleInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const RectangularBox box, const DeviceAngleList anglelist, ScalorType * statistic_b_buff0, ScalorType * statistic_b_buff1, ScalorType * statistic_b_buff2, ScalorType * statistic_b_buff3, mdError_t * ptr_de) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f; IndexType ii = tid + bid * blockDim.x; IndexType myNumAngle; extern __shared__ volatile ScalorType buff[]; buff[tid] = 0.f; __syncthreads(); if (ii < numAtom) { CoordType ref; #ifdef COMPILE_NO_TEX ref = coord[ii]; #else ref = tex1Dfetch(global_texRef_interaction_coord, ii); #endif myNumAngle = anglelist.numAngle[ii]; for (IndexType jj = 0; jj < myNumAngle; ++jj){ IndexType targetIdx0 = anglelist.angleNeighborIndex[((jj<<1) ) * anglelist.stride + ii]; IndexType targetIdx1 = anglelist.angleNeighborIndex[((jj<<1)+1) * anglelist.stride + ii]; IndexType myPosi = anglelist.anglePosi[jj * anglelist.stride + ii]; CoordType target0, target1; #ifdef COMPILE_NO_TEX target0 = coord[targetIdx0]; target1 = coord[targetIdx1]; #else target0 = tex1Dfetch(global_texRef_interaction_coord, targetIdx0); target1 = tex1Dfetch(global_texRef_interaction_coord, targetIdx1); #endif ScalorType diff0x, diff0y, diff0z; ScalorType diff1x, diff1y, diff1z; bool center = (myPosi == 1); if (center){ diff0x = ref.x - target0.x; diff0y = ref.y - target0.y; diff0z = ref.z - target0.z; diff1x = target1.x - ref.x; diff1y = target1.y - ref.y; diff1z = target1.z - ref.z; } else { diff0x = target0.x - ref.x; diff0y = target0.y - ref.y; diff0z = target0.z - ref.z; diff1x = target1.x - target0.x; diff1y = target1.y - target0.y; diff1z = target1.z - target0.z; } shortestImage (box, &diff0x, &diff0y, &diff0z); shortestImage (box, &diff1x, &diff1y, &diff1z); ScalorType f0x, f0y, f0z; ScalorType f1x, f1y, f1z; IndexType angleFindex = anglelist.angleIndex[jj * anglelist.stride + ii]; ScalorType dp; angleForcePoten (center, bondedInteractionType[angleFindex], &bondedInteractionParameter [bondedInteractionParameterPosition[angleFindex]], diff0x, diff0y, diff0z, diff1x, diff1y, diff1z, &f0x, &f0y, &f0z, &f1x, &f1y, &f1z, &dp); myPoten += dp; if (center){ fsumx += f0x + f1x; fsumy += f0y + f1y; fsumz += f0z + f1z; myVxx -= f0x * diff0x - f1x * diff1x; myVyy -= f0y * diff0y - f1y * diff1y; myVzz -= f0z * diff0z - f1z * diff1z; } else { fsumx -= f0x; fsumy -= f0y; fsumz -= f0z; } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } buff[tid] = myPoten * 0.33333333333333333f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff0[bid] = buff[0]; __syncthreads(); buff[tid] = myVxx; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff1[bid] = buff[0]; __syncthreads(); buff[tid] = myVyy; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff2[bid] = buff[0]; __syncthreads(); buff[tid] = myVzz; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff3[bid] = buff[0]; __syncthreads(); } // static __device__ IndexType shiftedD3toD1 ( // DeviceCellList clist, // RectangularBoxGeometry::RectangularBox box, // int ix, int iy, int iz, // ScalorType * shiftx , ScalorType * shifty, ScalorType * shiftz) // { // int tmp; // ix += (tmp = -int(floorf(ix * clist.NCelli.x))) * clist.NCell.x; // *shiftx = tmp * box.size.x; // iy += (tmp = -int(floorf(iy * clist.NCelli.y))) * clist.NCell.y; // *shifty = tmp * box.size.y; // iz += (tmp = -int(floorf(iz * clist.NCelli.z))) * clist.NCell.z; // *shiftz = tmp * box.size.z; // return D3toD1 (clist.NCell, ix, iy, iz); // } // __global__ void calNonBondedInteraction ( // const IndexType numAtom, // const CoordType * coord, // ScalorType * forcx, // ScalorType * forcy, // ScalorType * forcz, // const TypeType * type, // const RectangularBox box, // DeviceCellList clist, // mdError_t * ptr_de) // { // // RectangularBoxGeometry::normalizeSystem (box, &ddata); // IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; // IndexType tid = threadIdx.x; // IndexType bidx, bidy, bidz; // D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // // load index // IndexType ii = getDeviceCellListData (clist, bid, tid); // // load iith coordinate // use texturefetch instead // CoordType ref; // TypeType reftype; // ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); // if (ii != MaxIndexValue){ // #ifdef COMPILE_NO_TEX // ref = coord[ii]; // reftype = type[ii]; // #else // ref = tex1Dfetch (global_texRef_interaction_coord, ii); // // reftype = tex1Dfetch(global_texRef_interaction_type, ii); // #endif // } // ScalorType rlist = clist.rlist; // // the target index and coordinates are shared // extern __shared__ volatile char pub_sbuff[]; // volatile IndexType * targetIndexes = // (volatile IndexType *) pub_sbuff; // CoordType * target = // (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; // volatile TypeType * targettype = // (volatile TypeType *) &target[roundUp4(blockDim.x)]; // __syncthreads(); // // bool oneCellX(false), oneCellY(false), oneCellZ(false); // // if (clist.NCell.x == 1) oneCellX = true; // // if (clist.NCell.y == 1) oneCellY = true; // // if (clist.NCell.z == 1) oneCellZ = true; // // int upperx(1), lowerx(-1); // // int uppery(1), lowery(-1); // // int upperz(1), lowerz(-1); // // if (oneCellX) {lowerx = 0; upperx = 0;} // // if (oneCellY) {lowery = 0; uppery = 0;} // // if (oneCellZ) {lowerz = 0; upperz = 0;} // ScalorType rlist2 = rlist * rlist; // // loop over 27 neighbor cells // #pragma unroll 3 // // for (int nci = bidx + lowerx; nci <= bidx + upperx; ++nci){ // // for (int ncj = bidy + lowery; ncj <= bidy + uppery; ++ncj){ // // for (int nck = bidz + lowerz; nck <= bidz + upperz; ++nck){ // for (int nci = int(bidx) - 1; nci <= int(bidx) + 1; ++nci){ // for (int ncj = int(bidy) - 1; ncj <= int(bidy) + 1; ++ncj){ // for (int nck = int(bidz) - 1; nck <= int(bidz) + 1; ++nck){ // // for (int di = lowerx; di <= upperx; ++di){ // // for (int dj = lowery; dj <= uppery; ++dj){ // // for (int dk = lowerz; dk <= upperz; ++dk){ // __syncthreads(); // // the shift value of a cell is pre-computed // ScalorType xshift, yshift, zshift; // // int nci = di + bidx; // // int ncj = dj + bidy; // // int nck = dk + bidz; // IndexType targetCellIdx = shiftedD3toD1 (clist, box, // nci, ncj, nck, // &xshift, &yshift, &zshift); // // load target index and coordinates form global memary // // IndexType tmp = (targetIndexes[tid] = // // getDeviceCellListData(clist, targetCellIdx, tid)); // targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); // if (targetIndexes[tid] != MaxIndexValue){ // // #ifdef COMPILE_NO_TEX // // target[tid] = coord[tmp]; // // // targettype[tid] = type[tmp]; // // #else // target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); // // targettype[tid] = tex1Dfetch(global_texRef_interaction_type, tmp); // // #endif // } // __syncthreads(); // // find neighbor // if (ii != MaxIndexValue){ // for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // // if (targetIndexes[jj] == MaxIndexValue) break; // ScalorType diffx = target[jj].x - xshift - ref.x; // ScalorType diffy = target[jj].y - yshift - ref.y; // ScalorType diffz = target[jj].z - zshift - ref.z; // // if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); // // if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); // // if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); // //printf ("%d\t%d\t%f\t%f\n", ii, // ScalorType dr2; // if ((dr2 = (diffx*diffx+diffy*diffy+diffz*diffz)) < rlist2 && // targetIndexes[jj] != ii){ // IndexType fidx(0); // // fidx = AtomNBForceTable::calForceIndex ( // // nonBondedInteractionTable, // // const_numAtomType[0], // // reftype, // // targettype[jj]); // // if (fidx != mdForceNULL) { // ScalorType fx, fy, fz; // nbForce (nonBondedInteractionType[fidx], // &nonBondedInteractionParameter // [nonBondedInteractionParameterPosition[fidx]], // diffx, diffy, diffz, // dr2, // &fx, &fy, &fz); // fsumx += fx; // fsumy += fy; // fsumz += fz; // // } // } // } // } // } // } // } // if (ii != MaxIndexValue){ // forcx[ii] += fsumx; // forcy[ii] += fsumy; // forcz[ii] += fsumz; // } // } // __global__ void calNonBondedInteraction ( // const IndexType numAtom, // const CoordType * coord, // ScalorType * forcx, // ScalorType * forcy, // ScalorType * forcz, // const TypeType * type, // const RectangularBox box, // DeviceCellList clist, // ScalorType * statistic_nb_buff0, // ScalorType * statistic_nb_buff1, // ScalorType * statistic_nb_buff2, // ScalorType * statistic_nb_buff3, // mdError_t * ptr_de) // { // // RectangularBoxGeometry::normalizeSystem (box, &ddata); // IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; // IndexType tid = threadIdx.x; // IndexType bidx, bidy, bidz; // D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // // load index // IndexType ii = getDeviceCellListData (clist, bid, tid); // // load iith coordinate // use texturefetch instead // CoordType ref; // TypeType reftype; // ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); // ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); // if (ii != MaxIndexValue){ // #ifdef COMPILE_NO_TEX // ref = coord[ii]; // reftype = type[ii]; // #else // ref = tex1Dfetch (global_texRef_interaction_coord, ii); // reftype = tex1Dfetch(global_texRef_interaction_type, ii); // #endif // } // ScalorType rlist = clist.rlist; // // the target index and coordinates are shared // extern __shared__ volatile char pub_sbuff[]; // volatile IndexType * targetIndexes = // (volatile IndexType *) pub_sbuff; // CoordType * target = // (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; // volatile TypeType * targettype = // (volatile TypeType *) &target[roundUp4(blockDim.x)]; // __syncthreads(); // // bool oneCellX(false), oneCellY(false), oneCellZ(false); // // if (clist.NCell.x == 1) oneCellX = true; // // if (clist.NCell.y == 1) oneCellY = true; // // if (clist.NCell.z == 1) oneCellZ = true; // // int upperx(1), lowerx(-1); // // int uppery(1), lowery(-1); // // int upperz(1), lowerz(-1); // // if (oneCellX) {lowerx = 0; upperx = 0;} // // if (oneCellY) {lowery = 0; uppery = 0;} // // if (oneCellZ) {lowerz = 0; upperz = 0;} // ScalorType rlist2 = rlist * rlist; // // loop over 27 neighbor cells // #pragma unroll 3 // for (int nci = int(bidx) - 1; nci <= int(bidx) + 1; ++nci){ // for (int ncj = int(bidy) - 1; ncj <= int(bidy) + 1; ++ncj){ // for (int nck = int(bidz) - 1; nck <= int(bidz) + 1; ++nck){ // // for (int di = lowerx; di <= upperx; ++di){ // // for (int dj = lowery; dj <= uppery; ++dj){ // // for (int dk = lowerz; dk <= upperz; ++dk){ // __syncthreads(); // // the shift value of a cell is pre-computed // ScalorType xshift, yshift, zshift; // // int nci = di + bidx; // // int ncj = dj + bidy; // // int nck = dk + bidz; // IndexType targetCellIdx = shiftedD3toD1 (clist, box, // nci, ncj, nck, // &xshift, &yshift, &zshift); // // load target index and coordinates form global memary // IndexType tmp = (targetIndexes[tid] = // getDeviceCellListData(clist, targetCellIdx, tid)); // if (tmp != MaxIndexValue){ // #ifdef COMPILE_NO_TEX // target[tid] = coord[tmp]; // targettype[tid] = type[tmp]; // #else // target[tid] = tex1Dfetch(global_texRef_interaction_coord, tmp); // targettype[tid] = tex1Dfetch(global_texRef_interaction_type, tmp); // #endif // } // __syncthreads(); // // find neighbor // if (ii != MaxIndexValue){ // for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // ScalorType diffx = target[jj].x - xshift - ref.x; // ScalorType diffy = target[jj].y - yshift - ref.y; // ScalorType diffz = target[jj].z - zshift - ref.z; // // if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); // // if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); // // if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); // //printf ("%d\t%d\t%f\t%f\n", ii, // if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 && // targetIndexes[jj] != ii){ // IndexType fidx(0); // // fidx = AtomNBForceTable::calForceIndex ( // // nonBondedInteractionTable, // // const_numAtomType[0], // // reftype, // // targettype[jj]); // // if (fidx != mdForceNULL) { // ScalorType fx, fy, fz, dp; // nbForcePoten (nonBondedInteractionType[fidx], // &nonBondedInteractionParameter // [nonBondedInteractionParameterPosition[fidx]], // diffx, diffy, diffz, // &fx, &fy, &fz, &dp); // myPoten += dp; // myVxx += fx * diffx; // myVyy += fy * diffy; // myVzz += fz * diffz; // fsumx += fx; // fsumy += fy; // fsumz += fz; // // } // } // } // } // } // } // } // if (ii != MaxIndexValue){ // forcx[ii] += fsumx; // forcy[ii] += fsumy; // forcz[ii] += fsumz; // statistic_nb_buff0[ii] = myPoten * 0.5f; // statistic_nb_buff1[ii] = myVxx * 0.5f; // statistic_nb_buff2[ii] = myVyy * 0.5f; // statistic_nb_buff3[ii] = myVzz * 0.5f; // } // } __global__ void calNonBondedInteraction_cell (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceCellList clist, const ScalorType rcut, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rcut2 = rcut * rcut; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); //printf ("%d\t%d\t%f\t%f\n", ii, // ScalorType dr2; if (((diffx*diffx+diffy*diffy+diffz*diffz)) < rcut2 && targetIndexes[jj] != ii){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz; nbForce (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, // dr2, &fx, &fy, &fz); fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } if (ii != MaxIndexValue){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calNonBondedInteraction_cell (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceCellList clist, const ScalorType rcut, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rcut2 = rcut * rcut; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); //printf ("%d\t%d\t%f\t%f\n", ii, // ScalorType dr2; if (((diffx*diffx+diffy*diffy+diffz*diffz)) < rcut2 && targetIndexes[jj] != ii){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } if (ii != MaxIndexValue){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calNonBondedInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, DeviceCellList clist, const ScalorType rcut, DeviceNeighborList nlist, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); IndexType Nneighbor = 0; // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // ScalorType rlist = clist.rlist; // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rlist2 = nlist.rlist * nlist.rlist; ScalorType rcut2 = rcut * rcut; bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); if (dr2 < rcut2 && targetIndexes[jj] != ii){ // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); fsumx += fx; fsumy += fy; fsumz += fz; // } } if (dr2 < rlist2 && targetIndexes[jj] != ii){ IndexType listIdx = Nneighbor * nlist.stride + ii; nlist.data[listIdx] = targetIndexes[jj]; nlist.forceIndex[listIdx] = fidx; Nneighbor ++; } } } } if (ii != MaxIndexValue){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; if (Nneighbor > nlist.listLength && ptr_de != NULL){ *ptr_de = mdErrorShortNeighborList; return; } nlist.Nneighbor[ii] = Nneighbor; } } __global__ void calNonBondedInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, DeviceCellList clist, const ScalorType rcut, DeviceNeighborList nlist, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); IndexType Nneighbor = 0; // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // ScalorType rlist = clist.rlist; // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rlist2 = nlist.rlist * nlist.rlist; ScalorType rcut2 = rcut * rcut; bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); if (dr2 < rcut2 && targetIndexes[jj] != ii){ // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } if (dr2 < rlist2 && targetIndexes[jj] != ii){ IndexType listIdx = Nneighbor * nlist.stride + ii; nlist.data[listIdx] = targetIndexes[jj]; nlist.forceIndex[listIdx] = fidx; Nneighbor ++; } } } } if (ii != MaxIndexValue){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; if (Nneighbor > nlist.listLength && ptr_de != NULL){ *ptr_de = mdErrorShortNeighborList; return; } nlist.Nneighbor[ii] = Nneighbor; } } __global__ void calNonBondedInteraction_all (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const ScalorType rcut, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType numberAtom = numAtom; IndexType ii = tid + bid * blockDim.x; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); extern __shared__ volatile char pub_sbuff[]; volatile CoordType * target = (volatile CoordType *) pub_sbuff; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; __syncthreads(); CoordType ref; TypeType reftype; if (ii < numberAtom){ ref = coord[ii]; reftype = type[ii]; } ScalorType rcut2 = rcut * rcut; for (IndexType targetBlockId = 0; targetBlockId * blockDim.x < numberAtom; ++targetBlockId){ IndexType jj = tid + targetBlockId * blockDim.x; __syncthreads(); if (jj < numberAtom){ target[tid].x = coord[jj].x; target[tid].y = coord[jj].y; target[tid].z = coord[jj].z; targettype[tid] = type[jj]; } __syncthreads(); if (ii < numberAtom){ for (IndexType kk = 0; kk < blockDim.x; ++kk){ if (kk + targetBlockId * blockDim.x >= numberAtom) break; ScalorType diffx = target[kk].x - ref.x; ScalorType diffy = target[kk].y - ref.y; ScalorType diffz = target[kk].z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2; if ((dr2 = diffx*diffx+diffy*diffy+diffz*diffz) < rcut2 && kk + targetBlockId * blockDim.x != ii){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); if (dr2 < rcut2 ) { // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } } if (ii < numberAtom){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calNonBondedInteraction_all (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const ScalorType rcut, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType numberAtom = numAtom; IndexType ii = tid + bid * blockDim.x; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); extern __shared__ volatile char pub_sbuff[]; volatile CoordType * target = (volatile CoordType *) pub_sbuff; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; __syncthreads(); CoordType ref; TypeType reftype; if (ii < numberAtom){ ref = coord[ii]; reftype = type[ii]; } ScalorType rcut2 = rcut * rcut; for (IndexType targetBlockId = 0; targetBlockId * blockDim.x < numberAtom; ++targetBlockId){ IndexType jj = tid + targetBlockId * blockDim.x; __syncthreads(); if (jj < numberAtom){ target[tid].x = coord[jj].x; target[tid].y = coord[jj].y; target[tid].z = coord[jj].z; targettype[tid] = type[jj]; } __syncthreads(); if (ii < numberAtom){ for (IndexType kk = 0; kk < blockDim.x; ++kk){ if (kk + targetBlockId * blockDim.x >= numberAtom) break; ScalorType diffx = target[kk].x - ref.x; ScalorType diffy = target[kk].y - ref.y; ScalorType diffz = target[kk].z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2; if ((dr2 = diffx*diffx+diffy*diffy+diffz*diffz) < rcut2 && kk + targetBlockId * blockDim.x != ii){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); if (dr2 < rcut2 ) { // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } } if (ii < numberAtom){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calTwinRangeCorrection_cell (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceCellList clist, const ScalorType rcut1, const ScalorType rcut2, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rcut12 = rcut1 * rcut1; ScalorType rcut22 = rcut2 * rcut2; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); if (dr2 < rcut22 && dr2 >= rcut12 && targetIndexes[jj] != ii){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); // printf ("# %d\t%d\t%f\t%f\t%f\n", // ii, targetIndexes[jj], // ref.z, target[jj].z, fz); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } if (ii != MaxIndexValue){ forcx[ii] = fsumx; forcy[ii] = fsumy; forcz[ii] = fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calTwinRangeCorrection_all (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const ScalorType rcut1, const ScalorType rcut2, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType numberAtom = numAtom; IndexType ii = tid + bid * blockDim.x; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); extern __shared__ volatile char pub_sbuff[]; volatile CoordType * target = (volatile CoordType *) pub_sbuff; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; __syncthreads(); CoordType ref; TypeType reftype; if (ii < numberAtom){ ref = coord[ii]; reftype = type[ii]; } ScalorType rcut12 = rcut1 * rcut1; ScalorType rcut22 = rcut2 * rcut2; for (IndexType targetBlockId = 0; targetBlockId * blockDim.x < numberAtom; ++targetBlockId){ IndexType jj = tid + targetBlockId * blockDim.x; __syncthreads(); if (jj < numberAtom){ target[tid].x = coord[jj].x; target[tid].y = coord[jj].y; target[tid].z = coord[jj].z; targettype[tid] = type[jj]; } __syncthreads(); if (ii < numberAtom){ for (IndexType kk = 0; kk < blockDim.x; ++kk){ if (kk + targetBlockId * blockDim.x >= numberAtom) break; ScalorType diffx = target[kk].x - ref.x; ScalorType diffy = target[kk].y - ref.y; ScalorType diffz = target[kk].z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2 = diffx*diffx+diffy*diffy+diffz*diffz; if (dr2 < rcut22 && dr2 >= rcut12 && kk + targetBlockId * blockDim.x != ii){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } if (ii < numberAtom){ forcx[ii] = fsumx; forcy[ii] = fsumy; forcz[ii] = fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void buildNeighborListCalTwinRangeCorr_cell (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceCellList clist, const ScalorType rcut1, const ScalorType rcut2, DeviceNeighborList nlist, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // set number of neighbor to 0 IndexType Nneighbor = 0; // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rcut12 = rcut1 * rcut1; ScalorType rcut22 = rcut2 * rcut2; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); if (targetIndexes[jj] != ii){ if (dr2 < rcut22 && dr2 >= rcut12 ){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); // printf ("# %d\t%d\t%f\t%f\t%f\n", // ii, targetIndexes[jj], // ref.z, target[jj].z, fz); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } else if (dr2 < rcut12){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); IndexType listIdx = Nneighbor * nlist.stride + ii; nlist.data[listIdx] = targetIndexes[jj]; nlist.forceIndex[listIdx] = fidx; Nneighbor ++; } } } } } if (ii != MaxIndexValue){ forcx[ii] = fsumx; forcy[ii] = fsumy; forcz[ii] = fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; if (Nneighbor > nlist.listLength && ptr_de != NULL){ *ptr_de = mdErrorShortNeighborList; } nlist.Nneighbor[ii] = Nneighbor; } } __global__ void buildNeighborListCalTwinRangeCorr_all (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const ScalorType rcut1, const ScalorType rcut2, DeviceNeighborList nlist, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType numberAtom = numAtom; IndexType Nneighbor = 0; IndexType ii = tid + bid * blockDim.x; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); extern __shared__ volatile char pub_sbuff[]; volatile CoordType * target = (volatile CoordType *) pub_sbuff; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; __syncthreads(); CoordType ref; TypeType reftype; if (ii < numberAtom){ ref = coord[ii]; reftype = type[ii]; } ScalorType rcut12 = rcut1 * rcut1; ScalorType rcut22 = rcut2 * rcut2; for (IndexType targetBlockId = 0; targetBlockId * blockDim.x < numberAtom; ++targetBlockId){ IndexType jj = tid + targetBlockId * blockDim.x; __syncthreads(); if (jj < numberAtom){ target[tid].x = coord[jj].x; target[tid].y = coord[jj].y; target[tid].z = coord[jj].z; targettype[tid] = type[jj]; } __syncthreads(); if (ii < numberAtom){ for (IndexType kk = 0; kk < blockDim.x; ++kk){ if (kk + targetBlockId * blockDim.x >= numberAtom) break; ScalorType diffx = target[kk].x - ref.x; ScalorType diffy = target[kk].y - ref.y; ScalorType diffz = target[kk].z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2 = diffx*diffx+diffy*diffy+diffz*diffz; if (kk + targetBlockId * blockDim.x != ii){ if (dr2 < rcut22 && dr2 >= rcut12 ){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } else if (dr2 < rcut12){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); IndexType listIdx = Nneighbor * nlist.stride + ii; nlist.data[listIdx] = kk + targetBlockId * blockDim.x; nlist.forceIndex[listIdx] = fidx; Nneighbor ++; } } } } } if (ii < numberAtom){ forcx[ii] = fsumx; forcy[ii] = fsumy; forcz[ii] = fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; if (Nneighbor > nlist.listLength && ptr_de != NULL){ *ptr_de = mdErrorShortNeighborList; } nlist.Nneighbor[ii] = Nneighbor; } } __global__ void widomDeltaPoten_NVT (const IndexType numTestParticle, const CoordType * coordTestParticle, const TypeType * typeTestParticle, const IndexType numAtom, const CoordType * coord, const TypeType * type, const RectangularBox box, DeviceCellList clist, ScalorType * statistic_nb_buff0, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; // IndexType ii = tid + bid * blockDim.x; if (bid >= numTestParticle) return; // extern __shared__ volatile char pub_sbuff_widom[]; // volatile ScalorType * sumbuff = (volatile ScalorType *) pub_sbuff_widom; extern __shared__ volatile ScalorType sumbuff []; CoordType refCoord = coordTestParticle[bid]; TypeType refType = typeTestParticle[bid]; ScalorType myPoten (0.0f); IndexType refCelli, refCellj, refCellk; refCelli = IndexType (refCoord.x * box.sizei.x * ScalorType(clist.NCell.x)); refCellj = IndexType (refCoord.y * box.sizei.y * ScalorType(clist.NCell.y)); refCellk = IndexType (refCoord.z * box.sizei.z * ScalorType(clist.NCell.z)); if (refCelli == clist.NCell.x){ refCelli -= clist.NCell.x; refCoord.x -= box.size.x; } if (refCellj == clist.NCell.y){ refCellj -= clist.NCell.y; refCoord.y -= box.size.y; } if (refCellk == clist.NCell.z){ refCellk -= clist.NCell.z; refCoord.z -= box.size.z; } IndexType refCellIndex = D3toD1 (clist.NCell, refCelli, refCellj, refCellk); for (IndexType i = 0; i < clist.numNeighborCell[refCellIndex]; ++i){ __syncthreads (); IndexType targetCellIdx = getNeighborCellIndex (clist, refCellIndex, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, refCellIndex, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; IndexType targetIndex = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndex != MaxIndexValue){ TypeType targettype = tex1Dfetch(global_texRef_interaction_type, targetIndex); if (refType == targettype){ CoordType targetCoord = tex1Dfetch(global_texRef_interaction_coord, targetIndex); ScalorType diffx = targetCoord.x - shift.x - refCoord.x; ScalorType diffy = targetCoord.y - shift.y - refCoord.y; ScalorType diffz = targetCoord.z - shift.z - refCoord.z; ScalorType dr2 = ((diffx*diffx+diffy*diffy+diffz*diffz)); if (dr2 < clist.rlist*clist.rlist && dr2 > 1e-4){ IndexType fidx(0); ScalorType dp; fidx = AtomNBForceTable:: calForceIndex (const_nonBondedInteractionTable, const_numAtomType[0], refType, refType); nbPoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &dp); myPoten += dp; // printf ("dp: %f, %f %f %f\n", dp, diffx, diffy, diffz); } } } } sumbuff[tid] = myPoten; __syncthreads(); sumVectorBlockBuffer_2 (sumbuff); __syncthreads(); if (tid == 0){ statistic_nb_buff0[bid] = sumbuff[0]; } } // if (tid == 0){ // // printf ("### du is %f\n", sumbuff[0]); // statistic_nb_buff0[bid] = expf(- (sumbuff[0] + energyCorrection) / temperature); // } // } __global__ void widomDeltaPoten_allPair_NVT (const IndexType numTestParticle, const CoordType * coordTestParticle, const TypeType * typeTestParticle, const IndexType numAtom, const CoordType * coord, const TypeType * type, const RectangularBox box, const ScalorType rlist, ScalorType * statistic_nb_buff0, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; // IndexType ii = tid + bid * blockDim.x; if (bid >= numTestParticle) return; CoordType refCoord = coordTestParticle[bid]; TypeType refType = typeTestParticle[bid]; ScalorType myPoten = 0.; extern __shared__ volatile ScalorType sumbuff []; for (IndexType start = 0; start < numAtom; start += blockDim.x){ IndexType targetIndex = start + tid; if (targetIndex >= numAtom) break; TypeType targetType = type[targetIndex]; if (targetType != refType) continue; CoordType targetCoord = coord[targetIndex]; ScalorType diffx = targetCoord.x - refCoord.x; ScalorType diffy = targetCoord.y - refCoord.y; ScalorType diffz = targetCoord.z - refCoord.z; RectangularBoxGeometry::shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); if (dr2 < rlist * rlist && dr2 > 1e-4 ){ IndexType fidx(0); ScalorType dp; fidx = AtomNBForceTable:: calForceIndex (const_nonBondedInteractionTable, const_numAtomType[0], refType, refType); nbPoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &dp); myPoten += dp; } } sumbuff[tid] = myPoten; __syncthreads(); sumVectorBlockBuffer_2 (sumbuff); __syncthreads(); if (tid == 0){ statistic_nb_buff0[bid] = sumbuff[0]; } } // if (tid == 0){ // // printf ("### du is %f\n", sumbuff[0]); // statistic_nb_buff0[bid] = expf(- (sumbuff[0] + energyCorrection) / temperature); // } // }
60463dc8a16918a8f90e774fca2f529367037ca0.cu
#define DEVICE_CODE #include "systemDefines.h" #include "InteractionEngine_interface.h" #include "NonBondedInteraction.h" #include "BondInteraction.h" #include "AngleInteraction.h" // #include "CellList_interface.h" #include "Auxiliary.h" texture<CoordType, 1, cudaReadModeElementType> global_texRef_interaction_coord; texture<TypeType , 1, cudaReadModeElementType> global_texRef_interaction_type; __constant__ InteractionType nonBondedInteractionType [MaxNumberNonBondedInteraction]; __constant__ ScalorType nonBondedInteractionParameter [MaxNumberNonBondedInteractionParameter]; __constant__ IndexType nonBondedInteractionParameterPosition [MaxNumberNonBondedInteraction]; __constant__ InteractionType bondedInteractionType [MaxNumberBondedInteraction]; __constant__ IndexType bondedInteractionParameterPosition [MaxNumberBondedInteraction]; __constant__ ScalorType bondedInteractionParameter [MaxNumberBondedInteractionParamemter]; __constant__ IndexType const_nonBondedInteractionTableLength[1]; __constant__ IndexType const_numAtomType[1]; __constant__ IndexType const_nonBondedInteractionTable [MaxLengthNonBondedInteractionTable]; void InteractionEngine::init (const MDSystem & sys, const IndexType & NTread) { hasBond = false; hasAngle = false; myBlockDim.y = 1; myBlockDim.z = 1; myBlockDim.x = NTread; IndexType nob; if (sys.ddata.numAtom % myBlockDim.x == 0){ nob = sys.ddata.numAtom / myBlockDim.x; } else { nob = sys.ddata.numAtom / myBlockDim.x + 1; } atomGridDim = toGridDim (nob); // size_t sizetype = sizeof(TypeType)*sys.ddata.numMem; cudaBindTexture(0, global_texRef_interaction_coord, sys.ddata.coord, sizeof(CoordType) * sys.ddata.numMem); cudaBindTexture(0, global_texRef_interaction_type, sys.ddata.type, sizeof(TypeType) * sys.ddata.numMem); checkCUDAError ("InteractionEngine::init, bind texture"); // init sum vectors sum_nb_p.reinit (sys.ddata.numAtom, NThreadForSum); sum_nb_vxx.reinit (sys.ddata.numAtom, NThreadForSum); sum_nb_vyy.reinit (sys.ddata.numAtom, NThreadForSum); sum_nb_vzz.reinit (sys.ddata.numAtom, NThreadForSum); sum_b_p.reinit (nob, NThreadForSum); sum_b_vxx.reinit (nob, NThreadForSum); sum_b_vyy.reinit (nob, NThreadForSum); sum_b_vzz.reinit (nob, NThreadForSum); sum_angle_p.reinit (nob, NThreadForSum); sum_angle_vxx.reinit (nob, NThreadForSum); sum_angle_vyy.reinit (nob, NThreadForSum); sum_angle_vzz.reinit (nob, NThreadForSum); for (IndexType i = 0; i < 8; ++i){ cudaStreamCreate(&sum_stream[i]); } checkCUDAError ("InteractionEngine::init init sum statistic"); // exclusion list maxNumExclusion = 0; sharedExclusionList = false; exclusion_sbuffSize = size_t(0); } static IndexType hroundUp4 (const IndexType x) { if (x & 3 == 0){ return x; } else { return ((x >> 2) + 1) << 2; } } void InteractionEngine:: registNonBondedInteraction (const SystemNonBondedInteraction & sysNbInter) { if (! sysNbInter.beBuilt()) { throw MDExcptUnbuiltNonBondedInteraction ("InteractionEngine"); } if (sysNbInter.numberOfInteraction() > MaxNumberBondedInteraction ){ throw MDExcptExceedConstantMemLimit ( "InteractionEngine::registNonBondedInteraction", "nonBonedInteractionType", MaxNumberNonBondedInteraction * sizeof(InteractionType)); } if (sysNbInter.numberOfParameter() > MaxNumberNonBondedInteractionParameter ){ throw MDExcptExceedConstantMemLimit ( "InteractionEngine::registNonBondedInteraction", "nonBondedInteractionParameter", MaxNumberNonBondedInteractionParameter * sizeof(ScalorType)); } cudaMemcpyToSymbol (nonBondedInteractionType, sysNbInter.interactionType(), sizeof(InteractionType) * sysNbInter.numberOfInteraction()); cudaMemcpyToSymbol (nonBondedInteractionParameterPosition, sysNbInter.interactionParameterPosition(), sizeof(ScalorType) * sysNbInter.numberOfInteraction()); cudaMemcpyToSymbol (nonBondedInteractionParameter, sysNbInter.interactionParameter(), sizeof(IndexType) * sysNbInter.numberOfParameter()); checkCUDAError ("InteractionEngine::init, init NB force setting"); IndexType tableSize = sysNbInter.interactionTableSize(); IndexType tmpNumAtomType = sysNbInter.numberOfAtomTypes(); if (tableSize > MaxLengthNonBondedInteractionTable){ throw MDExcptExceedConstantMemLimit( "InteractionEngine::registNonBondedInteraction", "nonBondedInteractionTable", MaxLengthNonBondedInteractionTable * sizeof (ScalorType)); } cudaMemcpyToSymbol (const_nonBondedInteractionTableLength, &tableSize, sizeof (IndexType)); checkCUDAError ("InteractionEngine::init, const_nonBondedInteractionTableLength"); cudaMemcpyToSymbol (const_numAtomType, &tmpNumAtomType, sizeof (IndexType)); checkCUDAError ("InteractionEngine::init, const_numAtomType"); cudaMemcpyToSymbol (const_nonBondedInteractionTable, sysNbInter.interactionTable(), sizeof (IndexType) * tableSize); checkCUDAError ("InteractionEngine::init, const_nonBondedInteractionTable"); // applyNonBondedInteraction_CellList_sbuffSize = // sizeof(IndexType) * hroundUp4(myBlockDim.x) + // sizeof(CoordType) * hroundUp4(myBlockDim.x) + // sizeof(TypeType) * hroundUp4(myBlockDim.x); // printf ("total %d\npart1 %d\npart2 %d\npart3 %d\nround %d\n", // applyNonBondedInteraction_CellList_sbuffSize, // sizeof(IndexType) * hroundUp4(myBlockDim.x), // sizeof(CoordType) * hroundUp4(myBlockDim.x), // sizeof(TypeType) * hroundUp4(myBlockDim.x), // hroundUp4(myBlockDim.x)); // checkCUDAError ("InteractionEngine::init, init nonBondedInteractionTable"); energyCorr = sysNbInter.energyCorrection (); pressureCorr = sysNbInter.pressureCorrection (); maxNumExclusion = sysNbInter.maxNumberOfExclusion(); if (maxNumExclusion != 0){ sharedExclusionList = true; exclusion_sbuffSize = myBlockDim.x * maxNumExclusion * sizeof(IndexType); if (exclusion_sbuffSize > SystemSharedBuffSize){ sharedExclusionList = false; } } } void InteractionEngine:: registBondedInteraction (const SystemBondedInteraction & sysBdInter) { if (sysBdInter.hasBond() ){ hasBond = true; } if (sysBdInter.hasAngle()){ hasAngle = true; } if (sysBdInter.numberOfInteraction() > MaxNumberBondedInteraction ){ throw MDExcptExceedConstantMemLimit ( "InteractionEngine::registBondedInteraction", "bondedInteractionType", MaxNumberBondedInteraction * sizeof(InteractionType)); } if (sysBdInter.numberOfParameter() > MaxNumberBondedInteractionParamemter ){ throw MDExcptExceedConstantMemLimit ( "InteractionEngine::registBondedInteraction", "bondedInteractionParameter", MaxNumberBondedInteractionParamemter * sizeof(ScalorType)); } if (hasBond || hasAngle){ cudaMemcpyToSymbol (bondedInteractionType, sysBdInter.interactionType(), sizeof(InteractionType) * sysBdInter.numberOfInteraction()); cudaMemcpyToSymbol (bondedInteractionParameterPosition, sysBdInter.interactionParameterPosition(), sizeof(ScalorType) * sysBdInter.numberOfInteraction()); cudaMemcpyToSymbol (bondedInteractionParameter, sysBdInter.interactionParameter(), sizeof(IndexType) * sysBdInter.numberOfParameter()); checkCUDAError ("InteractionEngine::init, init bond force setting"); // cal shared buff size calBondInteraction_sbuffSize = myBlockDim.x * sizeof(ScalorType); calAngleInteraction_sbuffSize = myBlockDim.x * sizeof(ScalorType); } } InteractionEngine::~InteractionEngine() { cudaUnbindTexture(global_texRef_interaction_coord); cudaUnbindTexture(global_texRef_interaction_type); for (IndexType i = 0; i < 8; ++i){ cudaStreamDestroy(sum_stream[i]); } } void InteractionEngine::clearInteraction (MDSystem & sys) { clearForce <<<atomGridDim, myBlockDim>>>( sys.ddata.numAtom, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz); checkCUDAError ("InteractionEngine::clearInteraction"); } // nblock should be 1 and block size should be 1 __global__ void applyEnergyPressureCorrection (ScalorType * ddata, ScalorType energyCorr, ScalorType pressureCorr) { ddata[mdStatisticEnergyCorrection] = energyCorr; ddata[mdStatisticPressureCorrection] = pressureCorr; } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const NeighborList & nlist, const ExclusionList * excllist, MDTimer *timer ) { if (timer != NULL) timer->tic(mdTimeNonBondedInteraction); if (excllist == NULL){ calNonBondedInteraction_neighbor <<<atomGridDim, myBlockDim>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.ddata.rcut, sys.box, nlist.dnlist); } else{ calNonBondedInteraction_neighbor <<<atomGridDim, myBlockDim, exclusion_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, nlist.dnlist, excllist->dexcllist, sharedExclusionList ); } checkCUDAError ("InteractionEngine::applyInteraction nb"); err.check ("interaction engine nb"); if (timer != NULL) timer->toc(mdTimeNonBondedInteraction); } // void InteractionEngine:: // applyNonBondedInteraction (MDSystem & sys, // const CellList & clist, // const ScalorType & rcut, // NeighborList & nlist, // MDTimer *timer ) // { // if (timer != NULL) timer->tic(mdTimeBuildNeighborList); // size_t applyNonBondedInteraction_CellList_sbuffSize = // (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * // hroundUp4(clist.getBlockDim().x); // // sizeof(IndexType) * hroundUp4(myBlockDim.x) + // // sizeof(CoordType) * hroundUp4(myBlockDim.x) + // // sizeof(TypeType) * hroundUp4(myBlockDim.x); // calNonBondedInteraction // <<<clist.getCellGrimDim(), clist.getBlockDim(), // applyNonBondedInteraction_CellList_sbuffSize>>> ( // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.forcx, // sys.ddata.forcy, // sys.ddata.forcz, // sys.ddata.type, // sys.box, // clist.dclist, // rcut, // nlist.dnlist, // err.ptr_de); // checkCUDAError ("InteractionEngine::applyInteraction nb"); // err.check ("interaction engine nb"); // if (timer != NULL) timer->toc(mdTimeBuildNeighborList); // } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const NeighborList & nlist, MDStatistic & st, const ExclusionList * excllist, MDTimer *timer ) { if (timer != NULL) timer->tic(mdTimeNBInterStatistic); if (excllist == NULL){ calNonBondedInteraction_neighbor <<<atomGridDim, myBlockDim>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.ddata.rcut, sys.box, nlist.dnlist , sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff ); } else { calNonBondedInteraction_neighbor <<<atomGridDim, myBlockDim, exclusion_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, nlist.dnlist , excllist->dexcllist, sharedExclusionList, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff ); } checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)"); err.check ("interaction engine nb"); cudaThreadSynchronize(); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z; volumei = 1.f / volumei; // printf ("apply Ec %f, Pc %f\n", // energyCorr * volumei, // pressureCorr * volumei * volumei); applyEnergyPressureCorrection <<<1, 1, 0>>> (st.ddata, energyCorr * volumei, pressureCorr * volumei * volumei); cudaThreadSynchronize(); if (timer != NULL) timer->toc(mdTimeNBInterStatistic); } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const CellList & clist, const ScalorType & rcut, MDTimer *timer ) { if (!clist.isempty()){ if (timer != NULL) timer->tic(mdTimeNonBondedInteraction); size_t applyNonBondedInteraction_CellList_sbuffSize = (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(clist.getCellBlockDim().x); calNonBondedInteraction_cell <<<clist.getCellGrimDim(), clist.getCellBlockDim(), applyNonBondedInteraction_CellList_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, clist.dclist, rcut, err.ptr_de); checkCUDAError ("InteractionEngine::applyInteraction nb"); err.check ("interaction engine nb"); if (timer != NULL) timer->toc(mdTimeNonBondedInteraction); } else { applyNonBondedInteraction (sys, rcut, timer); } } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const CellList & clist, const ScalorType & rcut, MDStatistic & st, MDTimer *timer ) { if (!clist.isempty()){ if (timer != NULL) timer->tic(mdTimeNBInterStatistic); size_t applyNonBondedInteraction_CellList_sbuffSize = (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(clist.getCellBlockDim().x); calNonBondedInteraction_cell <<<clist.getCellGrimDim(), clist.getCellBlockDim(), applyNonBondedInteraction_CellList_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, clist.dclist, rcut, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de ); checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)"); err.check ("interaction engine nb"); cudaThreadSynchronize(); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z; volumei = 1.f / volumei; // printf ("apply Ec %f, Pc %f\n", // energyCorr * volumei, // pressureCorr * volumei * volumei); applyEnergyPressureCorrection <<<1, 1, 0>>> (st.ddata, energyCorr * volumei, pressureCorr * volumei * volumei); cudaThreadSynchronize(); if (timer != NULL) timer->toc(mdTimeNBInterStatistic); } else { applyNonBondedInteraction (sys, rcut, st, timer); } } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const ScalorType & rcut, MDTimer *timer ) { if (timer != NULL) timer->tic(mdTimeNonBondedInteraction); size_t applyNonBondedInteraction_AllPair_sbuffSize = (sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(myBlockDim.x); calNonBondedInteraction_all <<<atomGridDim, myBlockDim, applyNonBondedInteraction_AllPair_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, rcut, err.ptr_de); checkCUDAError ("InteractionEngine::applyInteraction nb"); err.check ("interaction engine nb"); if (timer != NULL) timer->toc(mdTimeNonBondedInteraction); } void InteractionEngine:: applyNonBondedInteraction (MDSystem & sys, const ScalorType & rcut, MDStatistic & st, MDTimer *timer ) { if (timer != NULL) timer->tic(mdTimeNBInterStatistic); size_t applyNonBondedInteraction_AllPair_sbuffSize = (sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(myBlockDim.x); calNonBondedInteraction_all <<<atomGridDim, myBlockDim, applyNonBondedInteraction_AllPair_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.ddata.type, sys.box, rcut, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)"); err.check ("interaction engine nb"); cudaThreadSynchronize(); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z; volumei = 1.f / volumei; // printf ("apply Ec %f, Pc %f\n", // energyCorr * volumei, // pressureCorr * volumei * volumei); applyEnergyPressureCorrection <<<1, 1, 0>>> (st.ddata, energyCorr * volumei, pressureCorr * volumei * volumei); cudaThreadSynchronize(); if (timer != NULL) timer->toc(mdTimeNBInterStatistic); } // void InteractionEngine:: // applyNonBondedInteraction (MDSystem & sys, // const CellList & clist, // const ScalorType & rcut, // NeighborList & nlist, // MDStatistic & st, // MDTimer *timer ) // { // if (timer != NULL) timer->tic(mdTimeBuildNeighborList); // if (!clist.isempty()){ // size_t applyNonBondedInteraction_CellList_sbuffSize = // (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * // hroundUp4(clist.getBlockDim().x); // calNonBondedInteraction // <<<clist.getCellGrimDim(), clist.getBlockDim(), // applyNonBondedInteraction_CellList_sbuffSize>>> ( // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.forcx, // sys.ddata.forcy, // sys.ddata.forcz, // sys.ddata.type, // sys.box, // clist.dclist, // rcut, // nlist.dnlist, // sum_nb_p.buff, // sum_nb_vxx.buff, // sum_nb_vyy.buff, // sum_nb_vzz.buff, // err.ptr_de // ); // } // checkCUDAError ("InteractionEngine::applyInteraction nb (with statistic)"); // err.check ("interaction engine nb"); // cudaThreadSynchronize(); // sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); // sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX, 1); // sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY, 2); // sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ, 3); // ScalorType volumei = sys.box.size.x * sys.box.size.y * sys.box.size.z; // volumei = 1.f / volumei; // // printf ("apply Ec %f, Pc %f\n", // // energyCorr * volumei, // // pressureCorr * volumei * volumei); // applyEnergyPressureCorrection // <<<1, 1, 0, 4>>> (st.ddata, // energyCorr * volumei, // pressureCorr * volumei * volumei); // cudaThreadSynchronize(); // if (timer != NULL) timer->toc(mdTimeBuildNeighborList); // } void InteractionEngine:: calTwinRangeCorrection (const MDSystem & sys, const CellList & clist, const ScalorType & rcut1, const ScalorType & rcut2, TwinRangeCorrectionRecorder & twrec, MDTimer * timer) { if (timer != NULL) timer->tic(mdTimeNBInterTwinRange); if (clist.isempty()){ size_t applyNonBondedInteraction_AllPair_sbuffSize = (sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(myBlockDim.x); calTwinRangeCorrection_all <<<atomGridDim, myBlockDim, applyNonBondedInteraction_AllPair_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, twrec.forcx, twrec.forcy, twrec.forcz, sys.ddata.type, sys.box, rcut1, rcut2, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); } else { size_t applyNonBondedInteraction_CellList_sbuffSize = (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(clist.getCellBlockDim().x); calTwinRangeCorrection_cell <<<clist.getCellGrimDim(), clist.getCellBlockDim(), applyNonBondedInteraction_CellList_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, twrec.forcx, twrec.forcy, twrec.forcz, sys.ddata.type, sys.box, clist.dclist, rcut1, rcut2, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); } checkCUDAError ("TwinRangeCorrectionRecorder::calTwinRangeCorrection"); err.check ("TwinRangeCorrectionRecorder::calTwinRangeCorrection"); cudaThreadSynchronize(); MDStatistic st (sys); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); cudaThreadSynchronize(); st.updateHost (); twrec.energyCorrection() = st.nonBondedEnergy(); twrec.pressureCorrection() = st.pressure(sys.box); if (timer != NULL) timer->toc(mdTimeNBInterTwinRange); } void InteractionEngine:: buildNeighborListCalTwinRangeCorrection (const MDSystem & sys, const CellList & clist, const ScalorType & rcut1, const ScalorType & rcut2, NeighborList & nlist, TwinRangeCorrectionRecorder & twrec, MDTimer * timer) { if (timer != NULL) timer->tic(mdTimeBuildNeighborList); if (clist.isempty()){ size_t applyNonBondedInteraction_AllPair_sbuffSize = (sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(myBlockDim.x); buildNeighborListCalTwinRangeCorr_all <<<atomGridDim, myBlockDim, applyNonBondedInteraction_AllPair_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, twrec.forcx, twrec.forcy, twrec.forcz, sys.ddata.type, sys.box, rcut1, rcut2, nlist.dnlist, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); } else { size_t applyNonBondedInteraction_CellList_sbuffSize = (sizeof(IndexType) + sizeof(CoordType) + sizeof(TypeType)) * hroundUp4(clist.getCellBlockDim().x); buildNeighborListCalTwinRangeCorr_cell <<<clist.getCellGrimDim(), clist.getCellBlockDim(), applyNonBondedInteraction_CellList_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, twrec.forcx, twrec.forcy, twrec.forcz, sys.ddata.type, sys.box, clist.dclist, rcut1, rcut2, nlist.dnlist, sum_nb_p.buff, sum_nb_vxx.buff, sum_nb_vyy.buff, sum_nb_vzz.buff, err.ptr_de); } checkCUDAError ("TwinRangeCorrectionRecorder::calTwinRangeCorrection"); err.check ("TwinRangeCorrectionRecorder::calTwinRangeCorrection"); cudaThreadSynchronize(); MDStatistic st (sys); sum_nb_p.sumBuffAdd(st.ddata, mdStatisticNonBondedPotential, 0); sum_nb_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_nb_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_nb_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); cudaThreadSynchronize(); st.updateHost (); twrec.energyCorrection() = st.nonBondedEnergy(); twrec.pressureCorrection() = st.pressure(sys.box); if (timer != NULL) timer->toc(mdTimeBuildNeighborList); } void InteractionEngine:: applyBondedInteraction (MDSystem & sys, const BondedInteractionList & bdlist, MDTimer *timer ) { if (hasBond) { if (timer != NULL) timer->tic(mdTimeBondedInteraction); calBondInteraction <<<atomGridDim, myBlockDim>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.box, bdlist.dbondlist); checkCUDAError ("InteractionEngine::applyInteraction bonded"); err.check ("interaction engine b"); if (timer != NULL) timer->toc(mdTimeBondedInteraction); } if (hasAngle){ if (timer != NULL) timer->tic(mdTimeAngleInteraction); calAngleInteraction <<<atomGridDim, myBlockDim>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.box, bdlist.danglelist); checkCUDAError ("InteractionEngine::applyInteraction angle"); err.check ("interaction engine angle"); if (timer != NULL) timer->toc(mdTimeAngleInteraction); } } void InteractionEngine:: applyBondedInteraction (MDSystem & sys, const BondedInteractionList & bdlist, MDStatistic & st, MDTimer *timer) { if (hasBond) { if (timer != NULL) timer->tic(mdTimeBInterStatistic); calBondInteraction <<<atomGridDim, myBlockDim, calBondInteraction_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.box, bdlist.dbondlist , sum_b_p.buff, sum_b_vxx.buff, sum_b_vyy.buff, sum_b_vzz.buff, err.ptr_de ); checkCUDAError ("InteractionEngine::applyInteraction bonded (with statistic)"); err.check ("interaction engine"); if (timer != NULL) timer->toc(mdTimeBInterStatistic); } if (hasBond) { if (timer != NULL) timer->tic(mdTimeBInterStatistic); cudaThreadSynchronize(); sum_b_p.sumBuffAdd(st.ddata, mdStatisticBondedPotential); sum_b_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_b_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_b_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); cudaThreadSynchronize(); if (timer != NULL) timer->toc(mdTimeBInterStatistic); checkCUDAError ("InteractionEngine::applyInteraction sum bond statistic (with statistic)"); } if (hasAngle){ if (timer != NULL) timer->tic(mdTimeAngleInterStatistic); calAngleInteraction <<<atomGridDim, myBlockDim, calAngleInteraction_sbuffSize>>> ( sys.ddata.numAtom, sys.ddata.coord, sys.ddata.forcx, sys.ddata.forcy, sys.ddata.forcz, sys.box, bdlist.danglelist, sum_angle_p.buff, sum_angle_vxx.buff, sum_angle_vyy.buff, sum_angle_vzz.buff, err.ptr_de); checkCUDAError ("InteractionEngine::applyInteraction angle"); err.check ("interaction engine angle"); if (timer != NULL) timer->toc(mdTimeAngleInterStatistic); } if (hasAngle){ if (timer != NULL) timer->tic(mdTimeAngleInterStatistic); sum_angle_p.sumBuffAdd(st.ddata, mdStatisticBondedPotential); sum_angle_vxx.sumBuffAdd(st.ddata, mdStatisticVirialXX); sum_angle_vyy.sumBuffAdd(st.ddata, mdStatisticVirialYY); sum_angle_vzz.sumBuffAdd(st.ddata, mdStatisticVirialZZ); cudaThreadSynchronize(); if (timer != NULL) timer->toc(mdTimeAngleInterStatistic); checkCUDAError ("InteractionEngine::applyInteraction sum angle statistic (with statistic)"); } } // void InteractionEngine:: // calculateWidomDeltaEnergy (const MDSystem & sys, // const NeighborList & nlist, // WidomTestParticleInsertion_NVT & wtest, // MDTimer * timer ) // { // if (timer != NULL) timer->tic(mdTimeNBInterStatistic); // // printf ("### %d\n", nlist.mode); // if (nlist.mode == CellListBuilt){ // // printf ("### here %f\n", wtest.energyCorrection()); // widomDeltaPoten_NVT // <<<toGridDim(wtest.numTestParticle()), // nlist.myBlockDim.x, // nlist.myBlockDim.x * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.dclist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // else if (nlist.mode == AllPairBuilt){ // // printf ("### here %f\n", wtest.energyCorrection()); // widomDeltaPoten_allPair_NVT // <<<toGridDim(wtest.numTestParticle()), // DefaultNThreadPerBlock, // DefaultNThreadPerBlock * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.myrlist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // if (timer != NULL) timer->toc(mdTimeNBInterStatistic); // } // void InteractionEngine:: // calculateWidomDeltaEnergy (const MDSystem & sys, // const NeighborList & nlist, // WidomTestParticleInsertion_NVT2 & wtest, // MDTimer * timer ) // { // if (timer != NULL) timer->tic(mdTimeNBInterStatistic); // // printf ("### %d\n", nlist.mode); // if (nlist.mode == CellListBuilt){ // // printf ("### here %f\n", wtest.energyCorrection()); // widomDeltaPoten_NVT // <<<toGridDim(wtest.numTestParticle()), // nlist.myBlockDim.x, // nlist.myBlockDim.x * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.dclist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // if (timer != NULL) timer->toc(mdTimeNBInterStatistic); // } // void InteractionEngine:: // calculateWidomDeltaEnergy (const MDSystem & sys, // const NeighborList & nlist, // WidomTestParticleInsertion_NPT & wtest, // MDTimer * timer ) // { // if (timer != NULL) timer->tic(mdTimeNBInterStatistic); // // printf ("### %d\n", nlist.mode); // if (nlist.mode == CellListBuilt){ // // printf ("### here %f, n: %d\n", wtest.energyCorrection(), wtest.numTestParticle()); // widomDeltaPoten_NVT // <<<toGridDim(wtest.numTestParticle()), // nlist.myBlockDim.x, // nlist.myBlockDim.x * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.dclist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // else if (nlist.mode == AllPairBuilt){ // // printf ("### here %f\n", wtest.energyCorrection()); // widomDeltaPoten_allPair_NVT // <<<toGridDim(wtest.numTestParticle()), // DefaultNThreadPerBlock, // DefaultNThreadPerBlock * sizeof(ScalorType)>>> ( // wtest.numTestParticle(), // wtest.coordTestParticle, // wtest.typeTestParticle, // sys.ddata.numAtom, // sys.ddata.coord, // sys.ddata.type, // sys.box, // nlist.myrlist, // wtest.sumExpDeltaU.buff, // err.ptr_de); // } // // for (unsigned i = 0; i < wtest.numTestParticle(); ++i){ // // printf ("%d %f (%f %f %f)\n", i, // // wtest.sumExpDeltaU.buff[i], // // wtest.coordTestParticle[i].x, // // wtest.coordTestParticle[i].y, // // wtest.coordTestParticle[i].z // // ); // // } // if (timer != NULL) timer->toc(mdTimeNBInterStatistic); // } __global__ void clearForce (const IndexType numAtom, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType ii = threadIdx.x + bid * blockDim.x; if (ii < numAtom) { forcx[ii] = 0.0f; forcy[ii] = 0.0f; forcz[ii] = 0.0f; } } // __global__ void // calNonBondedInteraction (const CoordType * coord, // const TypeType * type, // DeviceCellListData clist, // DeviceCellListProperty clistPro, // ScalorType * forcx, // ScalorType * forcy, // ScalorType * forcz, // bool sharednbForceTable) // { // IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; // IndexType tid = threadIdx.x; // ScalorType fsumx(0.f), fsumy(0.f), fsumz(0.f); // extern __shared__ volatile char pub_sbuff[]; // volatile IndexType * targetIndex = (volatile IndexType *) pub_sbuff; // CoordType * targetCoord = (CoordType *) &targetIndex[roundUp4(blockDim.x)]; // volatile TypeType * targetType = (volatile TypeType *) &targetCoord[roundUp4(blockDim.x)]; // __syncthreads(); // IndexType ii = get (clist, bid, tid); // CoordType ref; // TypeType refType; // if (ii != MaxIndexValue){ // ref = tex1Dfetch (global_texRef_interaction_coord, ii); // refType = tex1Dfetch(global_texRef_interaction_type, ii); // } // for (unsigned i = 0; i < numNeighborCell(clistPro, bid); ++i){ // __syncthreads(); // IndexType targetCellIndex = getTargetCellIndex (clistPro, bid, i); // CoordType shift = getShiftValue (clistPro, bid, i); // IndexType targetIndex[tid] = get (clist, targetCellIndex, tid); // if (targetIndex[tid] != MaxIndexValue){ // targetCoord[tid] = tex1Dfetch (global_texRef_interaction_coord, targetIndexes[tid]); // targetType[tid] = tex1Dfetch (global_texRef_interaction_type, targetIndexes[tid]); // } // __syncthreads (); // if (ii != MaxIndexValue){ // for (IndexType jj = 0; jj < blockDim.x; ++jj){ // if (targetIndex[jj] == MaxIndexValue) continue; // ScalorType diffx = targetCoord[jj].x + shift.x - ref.x; // ScalorType diffy = targetCoord[jj].y + shift.y - ref.y; // ScalorType diffz = targetCoord[jj].z + shift.z - ref.z; // if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 && // targetIndex[jj] != ii){ // ForceIndexType fidx; // if (sharednbForceTable){ // fidx = nonBondedInteractionTableItem ( // nonBondedInteractionTable, const_numAtomType, refType, targetType[jj]); // } // else { // fidx = nonBondedInteractionTableItem ( // nonBondedInteractionTable, const_numAtomType, refType, targetType[jj]); // } // ScalorType fx, fy, fz; // nbforce (nonBondedInteractionType[fidx], // &nonBondedInteractionParameter // [nonBondedInteractionParameterPosition[fidx]], // diffx, diffy, diffz, // &fx, &fy, &fz); // fsumx += fx; // fsumy += fy; // fsumz += fz; // } // } // } // } // if (ii != MaxIndexValue){ // forcx[ii] += fsumx; // forcy[ii] += fsumy; // forcz[ii] += fsumz; // } // } __global__ void calNonBondedInteraction_neighbor (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const ScalorType * rcut, const RectangularBox box, const DeviceNeighborList nlist) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; if (ii < numAtom) { CoordType ref (tex1Dfetch(global_texRef_interaction_coord, ii)); ScalorType refrcut2 = rcut[ii]; refrcut2 = refrcut2 * refrcut2; ScalorType fx(0.f), fy(0.f), fz(0.f); for (IndexType jj = 0, nlistPosi = ii; jj < nlist.Nneighbor[ii]; ++jj, nlistPosi += nlist.stride){ IndexType targetIdx ( nlist.data [nlistPosi] ); IndexType nbForceIndex ( nlist.forceIndex [nlistPosi] ); CoordType target ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) ); ScalorType diffx ( target.x - ref.x ); ScalorType diffy ( target.y - ref.y ); ScalorType diffz ( target.z - ref.z ); shortestImage (box, &diffx, &diffy, &diffz); ScalorType rcut2 = target.w * target.w; if (rcut2 < refrcut2) rcut2 = refrcut2; if (diffx*diffx + diffy*diffy + diffz*diffz < rcut2){ // if (diffx*diffx + diffy*diffy + diffz*diffz < refrcut2){ nbForce (nonBondedInteractionType[nbForceIndex], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[nbForceIndex]], diffx, diffy, diffz, &fx, &fy, &fz); fsumx += fx; fsumy += fy; fsumz += fz; } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calNonBondedInteraction_neighbor (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceNeighborList nlist, const DeviceExclusionList dexcllist, const bool sharedExclusionList) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; IndexType * ptr_excllist; IndexType myNumExclusion (0); extern __shared__ char excl_sbuff[]; if (dexcllist.maxNumExclusion != 0 && ii < numAtom){ myNumExclusion = dexcllist.numExclusion[ii]; if (sharedExclusionList){ ptr_excllist = (IndexType *) excl_sbuff; for (IndexType jj = 0; jj < myNumExclusion; ++jj){ ptr_excllist[jj*blockDim.x+tid] = dexcllist.exclusionNeighborIndex[jj*dexcllist.stride+ii]; } } } if (ii < numAtom) { CoordType ref = tex1Dfetch(global_texRef_interaction_coord, ii); ScalorType fx(0.f), fy(0.f), fz(0.f); for (IndexType jj = 0, nlistPosi = ii; jj < nlist.Nneighbor[ii]; ++jj, nlistPosi += nlist.stride){ IndexType targetIdx ( nlist.data [nlistPosi] ); IndexType nbForceIndex; CoordType target; ScalorType diffx, diffy, diffz; if (sharedExclusionList){ for (IndexType kk = 0; kk < myNumExclusion; ++kk){ if (ptr_excllist[kk*blockDim.x+tid] == targetIdx) { goto skipInter; } } } else { for (IndexType kk = 0; kk < myNumExclusion; ++kk){ if (dexcllist.exclusionNeighborIndex[kk*dexcllist.stride+ii] == targetIdx) { goto skipInter; } } } nbForceIndex = ( nlist.forceIndex [nlistPosi] ); target = ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) ); diffx = ( target.x - ref.x ); diffy = ( target.y - ref.y ); diffz = ( target.z - ref.z ); shortestImage (box, &diffx, &diffy, &diffz); nbForce (nonBondedInteractionType[nbForceIndex], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[nbForceIndex]], diffx, diffy, diffz, &fx, &fy, &fz); fsumx += fx; fsumy += fy; fsumz += fz; skipInter: { } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calNonBondedInteraction_neighbor (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const ScalorType * rcut, const RectangularBox box, const DeviceNeighborList nlist, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f; if (ii < numAtom) { CoordType ref; ref = tex1Dfetch(global_texRef_interaction_coord, ii); ScalorType refrcut2 = rcut[ii]; refrcut2 = refrcut2 * refrcut2; ScalorType fx(0.f), fy(0.f), fz(0.f); ScalorType dp; for (IndexType jj = 0, nlistPosi = ii; jj < nlist.Nneighbor[ii]; ++jj, nlistPosi += nlist.stride){ IndexType targetIdx ( nlist.data[nlistPosi] ); IndexType nbForceIndex ( nlist.forceIndex [nlistPosi] ); CoordType target ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) ); ScalorType diffx ( target.x - ref.x ); ScalorType diffy ( target.y - ref.y ); ScalorType diffz ( target.z - ref.z ); shortestImage (box, &diffx, &diffy, &diffz); ScalorType rcut2 = target.w * target.w; if (rcut2 < refrcut2) rcut2 = refrcut2; if (diffx*diffx + diffy*diffy + diffz*diffz < rcut2){ // if (diffx*diffx + diffy*diffy + diffz*diffz < refrcut2){ nbForcePoten (nonBondedInteractionType[nbForceIndex], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[nbForceIndex]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); // printf ("## %d\t%d\t%f\t%f\t%f\n", // ii, targetIdx, // ref.z, target.z, fz); // printf ("%f, %f %f %f, %f %f %f, %f %f %f, %f\n", // sqrtf(diffx*diffx+diffy*diffy+diffz*diffz), // ref.x, ref.y, ref.z, // target.x, target.y, target.z, // diffx, diffy, diffz, // dp // ); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } if (ii < numAtom){ statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calNonBondedInteraction_neighbor (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceNeighborList nlist, const DeviceExclusionList dexcllist, const bool sharedExclusionList, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f; IndexType * ptr_excllist; IndexType myNumExclusion (0); extern __shared__ char excl_sbuff[]; if (dexcllist.maxNumExclusion != 0 && ii < numAtom){ myNumExclusion = dexcllist.numExclusion[ii]; if (sharedExclusionList){ ptr_excllist = (IndexType *) excl_sbuff; for (IndexType jj = 0; jj < myNumExclusion; ++jj){ ptr_excllist[jj*blockDim.x+tid] = dexcllist.exclusionNeighborIndex[jj*dexcllist.stride+ii]; } } } if (ii < numAtom) { CoordType ref; ref = tex1Dfetch(global_texRef_interaction_coord, ii); ScalorType fx(0.f), fy(0.f), fz(0.f); ScalorType dp; for (IndexType jj = 0, nlistPosi = ii; jj < nlist.Nneighbor[ii]; ++jj, nlistPosi += nlist.stride){ IndexType targetIdx ( nlist.data[nlistPosi] ); IndexType nbForceIndex; CoordType target; ScalorType diffx, diffy, diffz; if (sharedExclusionList){ for (IndexType kk = 0; kk < myNumExclusion; ++kk){ if (ptr_excllist[kk*blockDim.x+tid] == targetIdx) { goto skipInter; } } } else { for (IndexType kk = 0; kk < myNumExclusion; ++kk){ if (dexcllist.exclusionNeighborIndex[kk*dexcllist.stride+ii] == targetIdx) { goto skipInter; } } } nbForceIndex = ( nlist.forceIndex [nlistPosi] ); target = ( tex1Dfetch(global_texRef_interaction_coord, targetIdx) ); diffx = ( target.x - ref.x ); diffy = ( target.y - ref.y ); diffz = ( target.z - ref.z ); shortestImage (box, &diffx, &diffy, &diffz); nbForcePoten (nonBondedInteractionType[nbForceIndex], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[nbForceIndex]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); // printf ("## %d\t%d\t%f\t%f\t%f\n", // ii, targetIdx, // ref.z, target.z, fz); // printf ("%f, %f %f %f, %f %f %f, %f %f %f, %f\n", // sqrtf(diffx*diffx+diffy*diffy+diffz*diffz), // ref.x, ref.y, ref.z, // target.x, target.y, target.z, // diffx, diffy, diffz, // dp // ); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; skipInter: { } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } if (ii < numAtom){ statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calBondInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const RectangularBox box, const DeviceBondList bdlist) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; if (ii >= numAtom) return; CoordType ref; #ifdef COMPILE_NO_TEX ref = coord[ii]; #else ref = tex1Dfetch(global_texRef_interaction_coord, ii); #endif IndexType myNumBond = bdlist.numBond[ii]; for (IndexType jj = 0; jj < bdlist.maxNumBond; ++jj){ if (jj == myNumBond) break; IndexType targetIdx = bdlist.bondNeighborIndex[jj * bdlist.stride + ii]; CoordType target; #ifdef COMPILE_NO_TEX target = coord[targetIdx]; #else target = tex1Dfetch(global_texRef_interaction_coord, targetIdx); #endif ScalorType diffx, diffy, diffz; diffx = target.x - ref.x; diffy = target.y - ref.y; diffz = target.z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType fx, fy, fz; IndexType bondFindex = bdlist.bondIndex[jj * bdlist.stride + ii]; bondForce (bondedInteractionType[bondFindex], &bondedInteractionParameter [bondedInteractionParameterPosition[bondFindex]], diffx, diffy, diffz, &fx, &fy, &fz); fsumx += fx; fsumy += fy; fsumz += fz; } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } __global__ void calBondInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const RectangularBox box, const DeviceBondList bdlist, ScalorType * statistic_b_buff0, ScalorType * statistic_b_buff1, ScalorType * statistic_b_buff2, ScalorType * statistic_b_buff3, mdError_t * ptr_de) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; extern __shared__ volatile ScalorType buff[]; buff[tid] = 0.f; __syncthreads(); ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f; if (ii < numAtom) { CoordType ref; #ifdef COMPILE_NO_TEX ref = coord[ii]; #else ref = tex1Dfetch(global_texRef_interaction_coord, ii); #endif IndexType myNumBond = bdlist.numBond[ii]; for (IndexType jj = 0; jj < bdlist.maxNumBond; ++jj){ if (jj == myNumBond) break; IndexType targetIdx = bdlist.bondNeighborIndex[jj * bdlist.stride + ii]; CoordType target; #ifdef COMPILE_NO_TEX target = coord[targetIdx]; #else target = tex1Dfetch(global_texRef_interaction_coord, targetIdx); #endif ScalorType diffx, diffy, diffz; diffx = target.x - ref.x; diffy = target.y - ref.y; diffz = target.z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType fx, fy, fz; IndexType bondFindex = bdlist.bondIndex[jj * bdlist.stride + ii]; ScalorType dp; bondForcePoten (bondedInteractionType[bondFindex], &bondedInteractionParameter [bondedInteractionParameterPosition[bondFindex]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } buff[tid] = myPoten * 0.5f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff0[bid] = buff[0]; __syncthreads(); buff[tid] = myVxx * 0.5f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff1[bid] = buff[0]; __syncthreads(); buff[tid] = myVyy * 0.5f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff2[bid] = buff[0]; __syncthreads(); buff[tid] = myVzz * 0.5f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff3[bid] = buff[0]; __syncthreads(); } __global__ void calAngleInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const RectangularBox box, const DeviceAngleList anglelist) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; IndexType ii = tid + bid * blockDim.x; IndexType myNumAngle; if (ii < numAtom){ myNumAngle = anglelist.numAngle[ii]; } else { myNumAngle = 0; return ; } // if (__all(myNumAngle == 0)) return ; if (ii < numAtom) { CoordType ref; #ifdef COMPILE_NO_TEX ref = coord[ii]; #else ref = tex1Dfetch(global_texRef_interaction_coord, ii); #endif for (IndexType jj = 0; jj < myNumAngle; ++jj){ IndexType targetIdx0 = anglelist.angleNeighborIndex[((jj<<1) ) * anglelist.stride + ii]; IndexType targetIdx1 = anglelist.angleNeighborIndex[((jj<<1)+1) * anglelist.stride + ii]; IndexType myPosi = anglelist.anglePosi[jj * anglelist.stride + ii]; CoordType target0, target1; #ifdef COMPILE_NO_TEX target0 = coord[targetIdx0]; target1 = coord[targetIdx1]; #else target0 = tex1Dfetch(global_texRef_interaction_coord, targetIdx0); target1 = tex1Dfetch(global_texRef_interaction_coord, targetIdx1); #endif ScalorType diff0x, diff0y, diff0z; ScalorType diff1x, diff1y, diff1z; bool center (myPosi == 1); if (center){ diff0x = ref.x - target0.x; diff0y = ref.y - target0.y; diff0z = ref.z - target0.z; diff1x = target1.x - ref.x; diff1y = target1.y - ref.y; diff1z = target1.z - ref.z; } else { diff0x = target0.x - ref.x; diff0y = target0.y - ref.y; diff0z = target0.z - ref.z; diff1x = target1.x - target0.x; diff1y = target1.y - target0.y; diff1z = target1.z - target0.z; } shortestImage (box, &diff0x, &diff0y, &diff0z); shortestImage (box, &diff1x, &diff1y, &diff1z); ScalorType f0x, f0y, f0z; ScalorType f1x, f1y, f1z; IndexType angleFindex = anglelist.angleIndex[jj * anglelist.stride + ii]; angleForce (center, bondedInteractionType[angleFindex], &bondedInteractionParameter [bondedInteractionParameterPosition[angleFindex]], diff0x, diff0y, diff0z, diff1x, diff1y, diff1z, &f0x, &f0y, &f0z, &f1x, &f1y, &f1z); if (center){ fsumx += f0x + f1x; fsumy += f0y + f1y; fsumz += f0z + f1z; } else { fsumx -= f0x; fsumy -= f0y; fsumz -= f0z; } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calAngleInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const RectangularBox box, const DeviceAngleList anglelist, ScalorType * statistic_b_buff0, ScalorType * statistic_b_buff1, ScalorType * statistic_b_buff2, ScalorType * statistic_b_buff3, mdError_t * ptr_de) { IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; ScalorType fsumx = 0.0f; ScalorType fsumy = 0.0f; ScalorType fsumz = 0.0f; ScalorType myPoten = 0.0f, myVxx = 0.0f, myVyy = 0.0f, myVzz = 0.0f; IndexType ii = tid + bid * blockDim.x; IndexType myNumAngle; extern __shared__ volatile ScalorType buff[]; buff[tid] = 0.f; __syncthreads(); if (ii < numAtom) { CoordType ref; #ifdef COMPILE_NO_TEX ref = coord[ii]; #else ref = tex1Dfetch(global_texRef_interaction_coord, ii); #endif myNumAngle = anglelist.numAngle[ii]; for (IndexType jj = 0; jj < myNumAngle; ++jj){ IndexType targetIdx0 = anglelist.angleNeighborIndex[((jj<<1) ) * anglelist.stride + ii]; IndexType targetIdx1 = anglelist.angleNeighborIndex[((jj<<1)+1) * anglelist.stride + ii]; IndexType myPosi = anglelist.anglePosi[jj * anglelist.stride + ii]; CoordType target0, target1; #ifdef COMPILE_NO_TEX target0 = coord[targetIdx0]; target1 = coord[targetIdx1]; #else target0 = tex1Dfetch(global_texRef_interaction_coord, targetIdx0); target1 = tex1Dfetch(global_texRef_interaction_coord, targetIdx1); #endif ScalorType diff0x, diff0y, diff0z; ScalorType diff1x, diff1y, diff1z; bool center = (myPosi == 1); if (center){ diff0x = ref.x - target0.x; diff0y = ref.y - target0.y; diff0z = ref.z - target0.z; diff1x = target1.x - ref.x; diff1y = target1.y - ref.y; diff1z = target1.z - ref.z; } else { diff0x = target0.x - ref.x; diff0y = target0.y - ref.y; diff0z = target0.z - ref.z; diff1x = target1.x - target0.x; diff1y = target1.y - target0.y; diff1z = target1.z - target0.z; } shortestImage (box, &diff0x, &diff0y, &diff0z); shortestImage (box, &diff1x, &diff1y, &diff1z); ScalorType f0x, f0y, f0z; ScalorType f1x, f1y, f1z; IndexType angleFindex = anglelist.angleIndex[jj * anglelist.stride + ii]; ScalorType dp; angleForcePoten (center, bondedInteractionType[angleFindex], &bondedInteractionParameter [bondedInteractionParameterPosition[angleFindex]], diff0x, diff0y, diff0z, diff1x, diff1y, diff1z, &f0x, &f0y, &f0z, &f1x, &f1y, &f1z, &dp); myPoten += dp; if (center){ fsumx += f0x + f1x; fsumy += f0y + f1y; fsumz += f0z + f1z; myVxx -= f0x * diff0x - f1x * diff1x; myVyy -= f0y * diff0y - f1y * diff1y; myVzz -= f0z * diff0z - f1z * diff1z; } else { fsumx -= f0x; fsumy -= f0y; fsumz -= f0z; } } forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } buff[tid] = myPoten * 0.33333333333333333f; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff0[bid] = buff[0]; __syncthreads(); buff[tid] = myVxx; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff1[bid] = buff[0]; __syncthreads(); buff[tid] = myVyy; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff2[bid] = buff[0]; __syncthreads(); buff[tid] = myVzz; sumVectorBlockBuffer_2 (buff); if (threadIdx.x == 0) statistic_b_buff3[bid] = buff[0]; __syncthreads(); } // static __device__ IndexType shiftedD3toD1 ( // DeviceCellList clist, // RectangularBoxGeometry::RectangularBox box, // int ix, int iy, int iz, // ScalorType * shiftx , ScalorType * shifty, ScalorType * shiftz) // { // int tmp; // ix += (tmp = -int(floorf(ix * clist.NCelli.x))) * clist.NCell.x; // *shiftx = tmp * box.size.x; // iy += (tmp = -int(floorf(iy * clist.NCelli.y))) * clist.NCell.y; // *shifty = tmp * box.size.y; // iz += (tmp = -int(floorf(iz * clist.NCelli.z))) * clist.NCell.z; // *shiftz = tmp * box.size.z; // return D3toD1 (clist.NCell, ix, iy, iz); // } // __global__ void calNonBondedInteraction ( // const IndexType numAtom, // const CoordType * coord, // ScalorType * forcx, // ScalorType * forcy, // ScalorType * forcz, // const TypeType * type, // const RectangularBox box, // DeviceCellList clist, // mdError_t * ptr_de) // { // // RectangularBoxGeometry::normalizeSystem (box, &ddata); // IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; // IndexType tid = threadIdx.x; // IndexType bidx, bidy, bidz; // D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // // load index // IndexType ii = getDeviceCellListData (clist, bid, tid); // // load iith coordinate // use texturefetch instead // CoordType ref; // TypeType reftype; // ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); // if (ii != MaxIndexValue){ // #ifdef COMPILE_NO_TEX // ref = coord[ii]; // reftype = type[ii]; // #else // ref = tex1Dfetch (global_texRef_interaction_coord, ii); // // reftype = tex1Dfetch(global_texRef_interaction_type, ii); // #endif // } // ScalorType rlist = clist.rlist; // // the target index and coordinates are shared // extern __shared__ volatile char pub_sbuff[]; // volatile IndexType * targetIndexes = // (volatile IndexType *) pub_sbuff; // CoordType * target = // (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; // volatile TypeType * targettype = // (volatile TypeType *) &target[roundUp4(blockDim.x)]; // __syncthreads(); // // bool oneCellX(false), oneCellY(false), oneCellZ(false); // // if (clist.NCell.x == 1) oneCellX = true; // // if (clist.NCell.y == 1) oneCellY = true; // // if (clist.NCell.z == 1) oneCellZ = true; // // int upperx(1), lowerx(-1); // // int uppery(1), lowery(-1); // // int upperz(1), lowerz(-1); // // if (oneCellX) {lowerx = 0; upperx = 0;} // // if (oneCellY) {lowery = 0; uppery = 0;} // // if (oneCellZ) {lowerz = 0; upperz = 0;} // ScalorType rlist2 = rlist * rlist; // // loop over 27 neighbor cells // #pragma unroll 3 // // for (int nci = bidx + lowerx; nci <= bidx + upperx; ++nci){ // // for (int ncj = bidy + lowery; ncj <= bidy + uppery; ++ncj){ // // for (int nck = bidz + lowerz; nck <= bidz + upperz; ++nck){ // for (int nci = int(bidx) - 1; nci <= int(bidx) + 1; ++nci){ // for (int ncj = int(bidy) - 1; ncj <= int(bidy) + 1; ++ncj){ // for (int nck = int(bidz) - 1; nck <= int(bidz) + 1; ++nck){ // // for (int di = lowerx; di <= upperx; ++di){ // // for (int dj = lowery; dj <= uppery; ++dj){ // // for (int dk = lowerz; dk <= upperz; ++dk){ // __syncthreads(); // // the shift value of a cell is pre-computed // ScalorType xshift, yshift, zshift; // // int nci = di + bidx; // // int ncj = dj + bidy; // // int nck = dk + bidz; // IndexType targetCellIdx = shiftedD3toD1 (clist, box, // nci, ncj, nck, // &xshift, &yshift, &zshift); // // load target index and coordinates form global memary // // IndexType tmp = (targetIndexes[tid] = // // getDeviceCellListData(clist, targetCellIdx, tid)); // targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); // if (targetIndexes[tid] != MaxIndexValue){ // // #ifdef COMPILE_NO_TEX // // target[tid] = coord[tmp]; // // // targettype[tid] = type[tmp]; // // #else // target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); // // targettype[tid] = tex1Dfetch(global_texRef_interaction_type, tmp); // // #endif // } // __syncthreads(); // // find neighbor // if (ii != MaxIndexValue){ // for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // // if (targetIndexes[jj] == MaxIndexValue) break; // ScalorType diffx = target[jj].x - xshift - ref.x; // ScalorType diffy = target[jj].y - yshift - ref.y; // ScalorType diffz = target[jj].z - zshift - ref.z; // // if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); // // if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); // // if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); // //printf ("%d\t%d\t%f\t%f\n", ii, // ScalorType dr2; // if ((dr2 = (diffx*diffx+diffy*diffy+diffz*diffz)) < rlist2 && // targetIndexes[jj] != ii){ // IndexType fidx(0); // // fidx = AtomNBForceTable::calForceIndex ( // // nonBondedInteractionTable, // // const_numAtomType[0], // // reftype, // // targettype[jj]); // // if (fidx != mdForceNULL) { // ScalorType fx, fy, fz; // nbForce (nonBondedInteractionType[fidx], // &nonBondedInteractionParameter // [nonBondedInteractionParameterPosition[fidx]], // diffx, diffy, diffz, // dr2, // &fx, &fy, &fz); // fsumx += fx; // fsumy += fy; // fsumz += fz; // // } // } // } // } // } // } // } // if (ii != MaxIndexValue){ // forcx[ii] += fsumx; // forcy[ii] += fsumy; // forcz[ii] += fsumz; // } // } // __global__ void calNonBondedInteraction ( // const IndexType numAtom, // const CoordType * coord, // ScalorType * forcx, // ScalorType * forcy, // ScalorType * forcz, // const TypeType * type, // const RectangularBox box, // DeviceCellList clist, // ScalorType * statistic_nb_buff0, // ScalorType * statistic_nb_buff1, // ScalorType * statistic_nb_buff2, // ScalorType * statistic_nb_buff3, // mdError_t * ptr_de) // { // // RectangularBoxGeometry::normalizeSystem (box, &ddata); // IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; // IndexType tid = threadIdx.x; // IndexType bidx, bidy, bidz; // D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // // load index // IndexType ii = getDeviceCellListData (clist, bid, tid); // // load iith coordinate // use texturefetch instead // CoordType ref; // TypeType reftype; // ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); // ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); // if (ii != MaxIndexValue){ // #ifdef COMPILE_NO_TEX // ref = coord[ii]; // reftype = type[ii]; // #else // ref = tex1Dfetch (global_texRef_interaction_coord, ii); // reftype = tex1Dfetch(global_texRef_interaction_type, ii); // #endif // } // ScalorType rlist = clist.rlist; // // the target index and coordinates are shared // extern __shared__ volatile char pub_sbuff[]; // volatile IndexType * targetIndexes = // (volatile IndexType *) pub_sbuff; // CoordType * target = // (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; // volatile TypeType * targettype = // (volatile TypeType *) &target[roundUp4(blockDim.x)]; // __syncthreads(); // // bool oneCellX(false), oneCellY(false), oneCellZ(false); // // if (clist.NCell.x == 1) oneCellX = true; // // if (clist.NCell.y == 1) oneCellY = true; // // if (clist.NCell.z == 1) oneCellZ = true; // // int upperx(1), lowerx(-1); // // int uppery(1), lowery(-1); // // int upperz(1), lowerz(-1); // // if (oneCellX) {lowerx = 0; upperx = 0;} // // if (oneCellY) {lowery = 0; uppery = 0;} // // if (oneCellZ) {lowerz = 0; upperz = 0;} // ScalorType rlist2 = rlist * rlist; // // loop over 27 neighbor cells // #pragma unroll 3 // for (int nci = int(bidx) - 1; nci <= int(bidx) + 1; ++nci){ // for (int ncj = int(bidy) - 1; ncj <= int(bidy) + 1; ++ncj){ // for (int nck = int(bidz) - 1; nck <= int(bidz) + 1; ++nck){ // // for (int di = lowerx; di <= upperx; ++di){ // // for (int dj = lowery; dj <= uppery; ++dj){ // // for (int dk = lowerz; dk <= upperz; ++dk){ // __syncthreads(); // // the shift value of a cell is pre-computed // ScalorType xshift, yshift, zshift; // // int nci = di + bidx; // // int ncj = dj + bidy; // // int nck = dk + bidz; // IndexType targetCellIdx = shiftedD3toD1 (clist, box, // nci, ncj, nck, // &xshift, &yshift, &zshift); // // load target index and coordinates form global memary // IndexType tmp = (targetIndexes[tid] = // getDeviceCellListData(clist, targetCellIdx, tid)); // if (tmp != MaxIndexValue){ // #ifdef COMPILE_NO_TEX // target[tid] = coord[tmp]; // targettype[tid] = type[tmp]; // #else // target[tid] = tex1Dfetch(global_texRef_interaction_coord, tmp); // targettype[tid] = tex1Dfetch(global_texRef_interaction_type, tmp); // #endif // } // __syncthreads(); // // find neighbor // if (ii != MaxIndexValue){ // for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // ScalorType diffx = target[jj].x - xshift - ref.x; // ScalorType diffy = target[jj].y - yshift - ref.y; // ScalorType diffz = target[jj].z - zshift - ref.z; // // if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); // // if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); // // if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); // //printf ("%d\t%d\t%f\t%f\n", ii, // if ((diffx*diffx+diffy*diffy+diffz*diffz) < rlist2 && // targetIndexes[jj] != ii){ // IndexType fidx(0); // // fidx = AtomNBForceTable::calForceIndex ( // // nonBondedInteractionTable, // // const_numAtomType[0], // // reftype, // // targettype[jj]); // // if (fidx != mdForceNULL) { // ScalorType fx, fy, fz, dp; // nbForcePoten (nonBondedInteractionType[fidx], // &nonBondedInteractionParameter // [nonBondedInteractionParameterPosition[fidx]], // diffx, diffy, diffz, // &fx, &fy, &fz, &dp); // myPoten += dp; // myVxx += fx * diffx; // myVyy += fy * diffy; // myVzz += fz * diffz; // fsumx += fx; // fsumy += fy; // fsumz += fz; // // } // } // } // } // } // } // } // if (ii != MaxIndexValue){ // forcx[ii] += fsumx; // forcy[ii] += fsumy; // forcz[ii] += fsumz; // statistic_nb_buff0[ii] = myPoten * 0.5f; // statistic_nb_buff1[ii] = myVxx * 0.5f; // statistic_nb_buff2[ii] = myVyy * 0.5f; // statistic_nb_buff3[ii] = myVzz * 0.5f; // } // } __global__ void calNonBondedInteraction_cell (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceCellList clist, const ScalorType rcut, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rcut2 = rcut * rcut; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); //printf ("%d\t%d\t%f\t%f\n", ii, // ScalorType dr2; if (((diffx*diffx+diffy*diffy+diffz*diffz)) < rcut2 && targetIndexes[jj] != ii){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz; nbForce (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, // dr2, &fx, &fy, &fz); fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } if (ii != MaxIndexValue){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calNonBondedInteraction_cell (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceCellList clist, const ScalorType rcut, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rcut2 = rcut * rcut; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); //printf ("%d\t%d\t%f\t%f\n", ii, // ScalorType dr2; if (((diffx*diffx+diffy*diffy+diffz*diffz)) < rcut2 && targetIndexes[jj] != ii){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } if (ii != MaxIndexValue){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calNonBondedInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, DeviceCellList clist, const ScalorType rcut, DeviceNeighborList nlist, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); IndexType Nneighbor = 0; // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // ScalorType rlist = clist.rlist; // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rlist2 = nlist.rlist * nlist.rlist; ScalorType rcut2 = rcut * rcut; bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); if (dr2 < rcut2 && targetIndexes[jj] != ii){ // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); fsumx += fx; fsumy += fy; fsumz += fz; // } } if (dr2 < rlist2 && targetIndexes[jj] != ii){ IndexType listIdx = Nneighbor * nlist.stride + ii; nlist.data[listIdx] = targetIndexes[jj]; nlist.forceIndex[listIdx] = fidx; Nneighbor ++; } } } } if (ii != MaxIndexValue){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; if (Nneighbor > nlist.listLength && ptr_de != NULL){ *ptr_de = mdErrorShortNeighborList; return; } nlist.Nneighbor[ii] = Nneighbor; } } __global__ void calNonBondedInteraction (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, DeviceCellList clist, const ScalorType rcut, DeviceNeighborList nlist, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); IndexType Nneighbor = 0; // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // ScalorType rlist = clist.rlist; // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rlist2 = nlist.rlist * nlist.rlist; ScalorType rcut2 = rcut * rcut; bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); if (dr2 < rcut2 && targetIndexes[jj] != ii){ // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } if (dr2 < rlist2 && targetIndexes[jj] != ii){ IndexType listIdx = Nneighbor * nlist.stride + ii; nlist.data[listIdx] = targetIndexes[jj]; nlist.forceIndex[listIdx] = fidx; Nneighbor ++; } } } } if (ii != MaxIndexValue){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; if (Nneighbor > nlist.listLength && ptr_de != NULL){ *ptr_de = mdErrorShortNeighborList; return; } nlist.Nneighbor[ii] = Nneighbor; } } __global__ void calNonBondedInteraction_all (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const ScalorType rcut, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType numberAtom = numAtom; IndexType ii = tid + bid * blockDim.x; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); extern __shared__ volatile char pub_sbuff[]; volatile CoordType * target = (volatile CoordType *) pub_sbuff; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; __syncthreads(); CoordType ref; TypeType reftype; if (ii < numberAtom){ ref = coord[ii]; reftype = type[ii]; } ScalorType rcut2 = rcut * rcut; for (IndexType targetBlockId = 0; targetBlockId * blockDim.x < numberAtom; ++targetBlockId){ IndexType jj = tid + targetBlockId * blockDim.x; __syncthreads(); if (jj < numberAtom){ target[tid].x = coord[jj].x; target[tid].y = coord[jj].y; target[tid].z = coord[jj].z; targettype[tid] = type[jj]; } __syncthreads(); if (ii < numberAtom){ for (IndexType kk = 0; kk < blockDim.x; ++kk){ if (kk + targetBlockId * blockDim.x >= numberAtom) break; ScalorType diffx = target[kk].x - ref.x; ScalorType diffy = target[kk].y - ref.y; ScalorType diffz = target[kk].z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2; if ((dr2 = diffx*diffx+diffy*diffy+diffz*diffz) < rcut2 && kk + targetBlockId * blockDim.x != ii){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); if (dr2 < rcut2 ) { // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } } if (ii < numberAtom){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; } } __global__ void calNonBondedInteraction_all (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const ScalorType rcut, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType numberAtom = numAtom; IndexType ii = tid + bid * blockDim.x; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); extern __shared__ volatile char pub_sbuff[]; volatile CoordType * target = (volatile CoordType *) pub_sbuff; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; __syncthreads(); CoordType ref; TypeType reftype; if (ii < numberAtom){ ref = coord[ii]; reftype = type[ii]; } ScalorType rcut2 = rcut * rcut; for (IndexType targetBlockId = 0; targetBlockId * blockDim.x < numberAtom; ++targetBlockId){ IndexType jj = tid + targetBlockId * blockDim.x; __syncthreads(); if (jj < numberAtom){ target[tid].x = coord[jj].x; target[tid].y = coord[jj].y; target[tid].z = coord[jj].z; targettype[tid] = type[jj]; } __syncthreads(); if (ii < numberAtom){ for (IndexType kk = 0; kk < blockDim.x; ++kk){ if (kk + targetBlockId * blockDim.x >= numberAtom) break; ScalorType diffx = target[kk].x - ref.x; ScalorType diffy = target[kk].y - ref.y; ScalorType diffz = target[kk].z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2; if ((dr2 = diffx*diffx+diffy*diffy+diffz*diffz) < rcut2 && kk + targetBlockId * blockDim.x != ii){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); if (dr2 < rcut2 ) { // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } } if (ii < numberAtom){ forcx[ii] += fsumx; forcy[ii] += fsumy; forcz[ii] += fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calTwinRangeCorrection_cell (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceCellList clist, const ScalorType rcut1, const ScalorType rcut2, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rcut12 = rcut1 * rcut1; ScalorType rcut22 = rcut2 * rcut2; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); if (dr2 < rcut22 && dr2 >= rcut12 && targetIndexes[jj] != ii){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); // printf ("# %d\t%d\t%f\t%f\t%f\n", // ii, targetIndexes[jj], // ref.z, target[jj].z, fz); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } if (ii != MaxIndexValue){ forcx[ii] = fsumx; forcy[ii] = fsumy; forcz[ii] = fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void calTwinRangeCorrection_all (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const ScalorType rcut1, const ScalorType rcut2, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType numberAtom = numAtom; IndexType ii = tid + bid * blockDim.x; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); extern __shared__ volatile char pub_sbuff[]; volatile CoordType * target = (volatile CoordType *) pub_sbuff; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; __syncthreads(); CoordType ref; TypeType reftype; if (ii < numberAtom){ ref = coord[ii]; reftype = type[ii]; } ScalorType rcut12 = rcut1 * rcut1; ScalorType rcut22 = rcut2 * rcut2; for (IndexType targetBlockId = 0; targetBlockId * blockDim.x < numberAtom; ++targetBlockId){ IndexType jj = tid + targetBlockId * blockDim.x; __syncthreads(); if (jj < numberAtom){ target[tid].x = coord[jj].x; target[tid].y = coord[jj].y; target[tid].z = coord[jj].z; targettype[tid] = type[jj]; } __syncthreads(); if (ii < numberAtom){ for (IndexType kk = 0; kk < blockDim.x; ++kk){ if (kk + targetBlockId * blockDim.x >= numberAtom) break; ScalorType diffx = target[kk].x - ref.x; ScalorType diffy = target[kk].y - ref.y; ScalorType diffz = target[kk].z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2 = diffx*diffx+diffy*diffy+diffz*diffz; if (dr2 < rcut22 && dr2 >= rcut12 && kk + targetBlockId * blockDim.x != ii){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } } } } if (ii < numberAtom){ forcx[ii] = fsumx; forcy[ii] = fsumy; forcz[ii] = fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; } } __global__ void buildNeighborListCalTwinRangeCorr_cell (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const DeviceCellList clist, const ScalorType rcut1, const ScalorType rcut2, DeviceNeighborList nlist, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType bidx, bidy, bidz; D1toD3 (clist.NCell, bid, bidx, bidy, bidz); // set number of neighbor to 0 IndexType Nneighbor = 0; // load index IndexType ii = getDeviceCellListData (clist, bid, tid); // load iith coordinate // use texturefetch instead CoordType ref; TypeType reftype; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); if (ii != MaxIndexValue){ #ifdef COMPILE_NO_TEX ref = coord[ii]; reftype = type[ii]; #else ref = tex1Dfetch (global_texRef_interaction_coord, ii); reftype = tex1Dfetch(global_texRef_interaction_type, ii); #endif } // the target index and coordinates are shared extern __shared__ volatile char pub_sbuff[]; volatile IndexType * targetIndexes = (volatile IndexType *) pub_sbuff; CoordType * target = (CoordType *) &targetIndexes[roundUp4(blockDim.x)]; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; ScalorType rcut12 = rcut1 * rcut1; ScalorType rcut22 = rcut2 * rcut2; for (IndexType i = 0; i < clist.numNeighborCell[bid]; ++i){ __syncthreads(); IndexType targetCellIdx = getNeighborCellIndex (clist, bid, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, bid, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; targetIndexes[tid] = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndexes[tid] != MaxIndexValue){ target[tid] = tex1Dfetch(global_texRef_interaction_coord, targetIndexes[tid]); targettype[tid] = tex1Dfetch(global_texRef_interaction_type, targetIndexes[tid]); } bool oneCellX(false), oneCellY(false), oneCellZ(false); if (clist.NCell.x == 1) oneCellX = true; if (clist.NCell.y == 1) oneCellY = true; if (clist.NCell.z == 1) oneCellZ = true; __syncthreads(); // find neighbor if (ii != MaxIndexValue){ for (IndexType jj = 0; jj < clist.numbers[targetCellIdx]; ++jj){ // if (targetIndexes[jj] == MaxIndexValue) break; ScalorType diffx = target[jj].x - shift.x - ref.x; ScalorType diffy = target[jj].y - shift.y - ref.y; ScalorType diffz = target[jj].z - shift.z - ref.z; if (oneCellX) shortestImage (box.size.x, box.sizei.x, &diffx); if (oneCellY) shortestImage (box.size.y, box.sizei.y, &diffy); if (oneCellZ) shortestImage (box.size.z, box.sizei.z, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); if (targetIndexes[jj] != ii){ if (dr2 < rcut22 && dr2 >= rcut12 ){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); // printf ("# %d\t%d\t%f\t%f\t%f\n", // ii, targetIndexes[jj], // ref.z, target[jj].z, fz); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } else if (dr2 < rcut12){ IndexType fidx(0); fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[jj]); IndexType listIdx = Nneighbor * nlist.stride + ii; nlist.data[listIdx] = targetIndexes[jj]; nlist.forceIndex[listIdx] = fidx; Nneighbor ++; } } } } } if (ii != MaxIndexValue){ forcx[ii] = fsumx; forcy[ii] = fsumy; forcz[ii] = fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; if (Nneighbor > nlist.listLength && ptr_de != NULL){ *ptr_de = mdErrorShortNeighborList; } nlist.Nneighbor[ii] = Nneighbor; } } __global__ void buildNeighborListCalTwinRangeCorr_all (const IndexType numAtom, const CoordType * coord, ScalorType * forcx, ScalorType * forcy, ScalorType * forcz, const TypeType * type, const RectangularBox box, const ScalorType rcut1, const ScalorType rcut2, DeviceNeighborList nlist, ScalorType * statistic_nb_buff0, ScalorType * statistic_nb_buff1, ScalorType * statistic_nb_buff2, ScalorType * statistic_nb_buff3, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; IndexType numberAtom = numAtom; IndexType Nneighbor = 0; IndexType ii = tid + bid * blockDim.x; ScalorType fsumx (0.f), fsumy(0.f), fsumz(0.f); ScalorType myPoten (0.0f), myVxx (0.0f), myVyy (0.0f), myVzz (0.0f); extern __shared__ volatile char pub_sbuff[]; volatile CoordType * target = (volatile CoordType *) pub_sbuff; volatile TypeType * targettype = (volatile TypeType *) &target[roundUp4(blockDim.x)]; __syncthreads(); CoordType ref; TypeType reftype; if (ii < numberAtom){ ref = coord[ii]; reftype = type[ii]; } ScalorType rcut12 = rcut1 * rcut1; ScalorType rcut22 = rcut2 * rcut2; for (IndexType targetBlockId = 0; targetBlockId * blockDim.x < numberAtom; ++targetBlockId){ IndexType jj = tid + targetBlockId * blockDim.x; __syncthreads(); if (jj < numberAtom){ target[tid].x = coord[jj].x; target[tid].y = coord[jj].y; target[tid].z = coord[jj].z; targettype[tid] = type[jj]; } __syncthreads(); if (ii < numberAtom){ for (IndexType kk = 0; kk < blockDim.x; ++kk){ if (kk + targetBlockId * blockDim.x >= numberAtom) break; ScalorType diffx = target[kk].x - ref.x; ScalorType diffy = target[kk].y - ref.y; ScalorType diffz = target[kk].z - ref.z; shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2 = diffx*diffx+diffy*diffy+diffz*diffz; if (kk + targetBlockId * blockDim.x != ii){ if (dr2 < rcut22 && dr2 >= rcut12 ){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); // if (fidx != mdForceNULL) { ScalorType fx, fy, fz, dp; nbForcePoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &fx, &fy, &fz, &dp); myPoten += dp; myVxx += fx * diffx; myVyy += fy * diffy; myVzz += fz * diffz; fsumx += fx; fsumy += fy; fsumz += fz; // } } else if (dr2 < rcut12){ IndexType fidx = AtomNBForceTable::calForceIndex ( const_nonBondedInteractionTable, const_numAtomType[0], reftype, targettype[kk]); IndexType listIdx = Nneighbor * nlist.stride + ii; nlist.data[listIdx] = kk + targetBlockId * blockDim.x; nlist.forceIndex[listIdx] = fidx; Nneighbor ++; } } } } } if (ii < numberAtom){ forcx[ii] = fsumx; forcy[ii] = fsumy; forcz[ii] = fsumz; statistic_nb_buff0[ii] = myPoten * 0.5f; statistic_nb_buff1[ii] = myVxx * 0.5f; statistic_nb_buff2[ii] = myVyy * 0.5f; statistic_nb_buff3[ii] = myVzz * 0.5f; if (Nneighbor > nlist.listLength && ptr_de != NULL){ *ptr_de = mdErrorShortNeighborList; } nlist.Nneighbor[ii] = Nneighbor; } } __global__ void widomDeltaPoten_NVT (const IndexType numTestParticle, const CoordType * coordTestParticle, const TypeType * typeTestParticle, const IndexType numAtom, const CoordType * coord, const TypeType * type, const RectangularBox box, DeviceCellList clist, ScalorType * statistic_nb_buff0, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; // IndexType ii = tid + bid * blockDim.x; if (bid >= numTestParticle) return; // extern __shared__ volatile char pub_sbuff_widom[]; // volatile ScalorType * sumbuff = (volatile ScalorType *) pub_sbuff_widom; extern __shared__ volatile ScalorType sumbuff []; CoordType refCoord = coordTestParticle[bid]; TypeType refType = typeTestParticle[bid]; ScalorType myPoten (0.0f); IndexType refCelli, refCellj, refCellk; refCelli = IndexType (refCoord.x * box.sizei.x * ScalorType(clist.NCell.x)); refCellj = IndexType (refCoord.y * box.sizei.y * ScalorType(clist.NCell.y)); refCellk = IndexType (refCoord.z * box.sizei.z * ScalorType(clist.NCell.z)); if (refCelli == clist.NCell.x){ refCelli -= clist.NCell.x; refCoord.x -= box.size.x; } if (refCellj == clist.NCell.y){ refCellj -= clist.NCell.y; refCoord.y -= box.size.y; } if (refCellk == clist.NCell.z){ refCellk -= clist.NCell.z; refCoord.z -= box.size.z; } IndexType refCellIndex = D3toD1 (clist.NCell, refCelli, refCellj, refCellk); for (IndexType i = 0; i < clist.numNeighborCell[refCellIndex]; ++i){ __syncthreads (); IndexType targetCellIdx = getNeighborCellIndex (clist, refCellIndex, i); CoordNoiType shiftNoi = getNeighborCellShiftNoi (clist, refCellIndex, i); CoordType shift; shift.x = shiftNoi.x * box.size.x; shift.y = shiftNoi.y * box.size.y; shift.z = shiftNoi.z * box.size.z; IndexType targetIndex = getDeviceCellListData(clist, targetCellIdx, tid); if (targetIndex != MaxIndexValue){ TypeType targettype = tex1Dfetch(global_texRef_interaction_type, targetIndex); if (refType == targettype){ CoordType targetCoord = tex1Dfetch(global_texRef_interaction_coord, targetIndex); ScalorType diffx = targetCoord.x - shift.x - refCoord.x; ScalorType diffy = targetCoord.y - shift.y - refCoord.y; ScalorType diffz = targetCoord.z - shift.z - refCoord.z; ScalorType dr2 = ((diffx*diffx+diffy*diffy+diffz*diffz)); if (dr2 < clist.rlist*clist.rlist && dr2 > 1e-4){ IndexType fidx(0); ScalorType dp; fidx = AtomNBForceTable:: calForceIndex (const_nonBondedInteractionTable, const_numAtomType[0], refType, refType); nbPoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &dp); myPoten += dp; // printf ("dp: %f, %f %f %f\n", dp, diffx, diffy, diffz); } } } } sumbuff[tid] = myPoten; __syncthreads(); sumVectorBlockBuffer_2 (sumbuff); __syncthreads(); if (tid == 0){ statistic_nb_buff0[bid] = sumbuff[0]; } } // if (tid == 0){ // // printf ("### du is %f\n", sumbuff[0]); // statistic_nb_buff0[bid] = expf(- (sumbuff[0] + energyCorrection) / temperature); // } // } __global__ void widomDeltaPoten_allPair_NVT (const IndexType numTestParticle, const CoordType * coordTestParticle, const TypeType * typeTestParticle, const IndexType numAtom, const CoordType * coord, const TypeType * type, const RectangularBox box, const ScalorType rlist, ScalorType * statistic_nb_buff0, mdError_t * ptr_de) { // RectangularBoxGeometry::normalizeSystem (box, &ddata); IndexType bid = blockIdx.x + gridDim.x * blockIdx.y; IndexType tid = threadIdx.x; // IndexType ii = tid + bid * blockDim.x; if (bid >= numTestParticle) return; CoordType refCoord = coordTestParticle[bid]; TypeType refType = typeTestParticle[bid]; ScalorType myPoten = 0.; extern __shared__ volatile ScalorType sumbuff []; for (IndexType start = 0; start < numAtom; start += blockDim.x){ IndexType targetIndex = start + tid; if (targetIndex >= numAtom) break; TypeType targetType = type[targetIndex]; if (targetType != refType) continue; CoordType targetCoord = coord[targetIndex]; ScalorType diffx = targetCoord.x - refCoord.x; ScalorType diffy = targetCoord.y - refCoord.y; ScalorType diffz = targetCoord.z - refCoord.z; RectangularBoxGeometry::shortestImage (box, &diffx, &diffy, &diffz); ScalorType dr2 = (diffx*diffx+diffy*diffy+diffz*diffz); if (dr2 < rlist * rlist && dr2 > 1e-4 ){ IndexType fidx(0); ScalorType dp; fidx = AtomNBForceTable:: calForceIndex (const_nonBondedInteractionTable, const_numAtomType[0], refType, refType); nbPoten (nonBondedInteractionType[fidx], &nonBondedInteractionParameter [nonBondedInteractionParameterPosition[fidx]], diffx, diffy, diffz, &dp); myPoten += dp; } } sumbuff[tid] = myPoten; __syncthreads(); sumVectorBlockBuffer_2 (sumbuff); __syncthreads(); if (tid == 0){ statistic_nb_buff0[bid] = sumbuff[0]; } } // if (tid == 0){ // // printf ("### du is %f\n", sumbuff[0]); // statistic_nb_buff0[bid] = expf(- (sumbuff[0] + energyCorrection) / temperature); // } // }
69fe0c7792a07fbfa80b896232cad7bac68d21f0.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" #include <iostream> #include <fstream> #include <vector> #include <cmath> #define HANDLE_ERROR(err) \ do { if (err != hipSuccess) { printf("ERROR: %s\n", hipGetErrorString(err)); exit(0);} } while (0) __constant__ double AVG[32][3]; __constant__ double COV[32][3][3]; __constant__ double COV_INV[32][3][3]; __constant__ double DETS[32]; __device__ double func(uchar4 p, int i) { double res = 0.0, p_avg[3], tmp[3]; for (int j = 0; j < 3; ++j) { p_avg[j] = 0.0; tmp[j] = 0.0; } p_avg[0] = p.x - AVG[i][0]; p_avg[1] = p.y - AVG[i][1]; p_avg[2] = p.z - AVG[i][2]; for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { tmp[j] += -p_avg[k] * COV_INV[i][k][j]; } res += tmp[j] * p_avg[j]; } res -= ::log(std::abs(DETS[i])); return res; } __device__ int getClass(uchar4 pixel, int nc) { double res[32]; for (int i = 0; i < nc; ++i) { res[i] = func(pixel, i); } double maxEl = res[0]; int idx = 0; for (int i = 0; i < nc; ++i) { if (res[i] > maxEl) { maxEl = res[i]; idx = i; } } return idx; } __global__ void kernel(uchar4 *dst, int w, int h, int nc) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; for (int x = idx; x < w; x += offsetx) { for (int y = idy; y < h; y += offsety) { dst[x + y * w].w = getClass(dst[x + y * w], nc); } } } int main() { std::string input, output; int w, h, nc, np; uchar4 *data; std::cin >> input >> output >> nc; std::vector<std::vector<int2>> classes(nc); for (int i = 0; i < nc; ++i) { std::cin >> np; classes[i].resize(np); for (int j = 0; j < np; ++j) { std::cin >> classes[i][j].x >> classes[i][j].y; } } std::ifstream fsIn(input, std::ios::in | std::ios::binary); if (fsIn.is_open()) { fsIn.read((char *)&w, sizeof(w)); fsIn.read((char *)&h, sizeof(h)); data = new uchar4[w * h]; fsIn.read((char *)data, w * h * sizeof(data[0])); fsIn.close(); } else { return 1; } // double avg[32][3]; for (int i = 0; i < 32; ++i) { for (int j = 0; j < 3; ++j) { avg[i][j] = 0.0; } } for (int i = 0; i < nc; ++i) { int np = classes[i].size(); for (int j = 0; j < np; ++j) { int x = classes[i][j].x; int y = classes[i][j].y; uchar4 curPixel = data[x + y * w]; avg[i][0] += curPixel.x; avg[i][1] += curPixel.y; avg[i][2] += curPixel.z; } for (int k = 0; k < 3; ++k) { avg[i][k] /= np; } } double cov[32][3][3]; for (int i = 0; i < 32; ++i) { for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { cov[i][j][k] = 0.0; } } } for (int i = 0; i < nc; ++i) { np = classes[i].size(); for (int j = 0; j < np; ++j) { double tmp[3]; int x = classes[i][j].x; int y = classes[i][j].y; uchar4 curPixel = data[x + y * w]; tmp[0] = curPixel.x - avg[i][0]; tmp[1] = curPixel.y - avg[i][1]; tmp[2] = curPixel.z - avg[i][2]; for (int k = 0; k < 3; ++k) { for (int l = 0; l < 3; ++l) { cov[i][k][l] += tmp[k] * tmp[l]; } } } for (int k = 0; k < 3; ++k) { for (int l = 0; l < 3; ++l) { cov[i][k][l] /= np - 1; } } } double cov_inv[32][3][3]; for (int i = 0; i < 32; ++i) { for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { cov_inv[i][j][k] = 0.0; } } } double dets[32]; for (int i = 0; i < nc; ++i) { double det = 0; for (int j = 0; j < 3; ++j) { det += cov[i][0][j] * (cov[i][1][(j + 1) % 3] * cov[i][2][(j + 2) % 3] - cov[i][1][(j + 2) % 3] * cov[i][2][(j + 1) % 3]); } dets[i] = det; } // for (int i = 0; i < nc; ++i) { cov_inv[i][0][0] = (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) / dets[i]; cov_inv[i][0][1] = (cov[i][0][2] * cov[i][2][1] - cov[i][0][1] * cov[i][2][2]) / dets[i]; cov_inv[i][0][2] = (cov[i][0][1] * cov[i][1][2] - cov[i][0][2] * cov[i][1][1]) / dets[i]; cov_inv[i][1][0] = (cov[i][1][2] * cov[i][2][0] - cov[i][1][0] * cov[i][2][2]) / dets[i]; cov_inv[i][1][1] = (cov[i][0][0] * cov[i][2][2] - cov[i][0][2] * cov[i][2][0]) / dets[i]; cov_inv[i][1][2] = (cov[i][1][0] * cov[i][0][2] - cov[i][0][0] * cov[i][1][2]) / dets[i]; cov_inv[i][2][0] = (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]) / dets[i]; cov_inv[i][2][1] = (cov[i][2][0] * cov[i][0][1] - cov[i][0][0] * cov[i][2][1]) / dets[i]; cov_inv[i][2][2] = (cov[i][0][0] * cov[i][1][1] - cov[i][1][0] * cov[i][0][1]) / dets[i]; } HANDLE_ERROR(hipMemcpyToSymbol(AVG, avg, sizeof(double) * 32 * 3)); HANDLE_ERROR(hipMemcpyToSymbol(COV, cov, sizeof(double) * 32 * 3 * 3)); HANDLE_ERROR(hipMemcpyToSymbol(COV_INV, cov_inv, sizeof(double) * 32 * 3 * 3)); HANDLE_ERROR(hipMemcpyToSymbol(DETS, dets, sizeof(double) * 32)); uchar4 *dev_data; HANDLE_ERROR(hipMalloc(&dev_data, sizeof(uchar4) * h * w)); HANDLE_ERROR(hipMemcpy(dev_data, data, sizeof(uchar4) * h * w, hipMemcpyHostToDevice)); hipLaunchKernelGGL(( kernel), dim3(dim3(16, 16)), dim3(dim3(16, 16)), 0, 0, dev_data, w, h, nc); HANDLE_ERROR(hipMemcpy(data, dev_data, sizeof(uchar4) * h * w, hipMemcpyDeviceToHost)); std::ofstream fsOut(output, std::ios::out | std::ios::binary); if (fsOut.is_open()) { fsOut.write((char *)&w, sizeof(w)); fsOut.write((char *)&h, sizeof(h)); fsOut.write((char *)data, w * h * sizeof(data[0])); fsOut.close(); } else { return 1; } HANDLE_ERROR(hipFree(dev_data)); delete[] data; return 0; }
69fe0c7792a07fbfa80b896232cad7bac68d21f0.cu
#include <iostream> #include <fstream> #include <vector> #include <cmath> #define HANDLE_ERROR(err) \ do { if (err != cudaSuccess) { printf("ERROR: %s\n", cudaGetErrorString(err)); exit(0);} } while (0) __constant__ double AVG[32][3]; __constant__ double COV[32][3][3]; __constant__ double COV_INV[32][3][3]; __constant__ double DETS[32]; __device__ double func(uchar4 p, int i) { double res = 0.0, p_avg[3], tmp[3]; for (int j = 0; j < 3; ++j) { p_avg[j] = 0.0; tmp[j] = 0.0; } p_avg[0] = p.x - AVG[i][0]; p_avg[1] = p.y - AVG[i][1]; p_avg[2] = p.z - AVG[i][2]; for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { tmp[j] += -p_avg[k] * COV_INV[i][k][j]; } res += tmp[j] * p_avg[j]; } res -= std::log(std::abs(DETS[i])); return res; } __device__ int getClass(uchar4 pixel, int nc) { double res[32]; for (int i = 0; i < nc; ++i) { res[i] = func(pixel, i); } double maxEl = res[0]; int idx = 0; for (int i = 0; i < nc; ++i) { if (res[i] > maxEl) { maxEl = res[i]; idx = i; } } return idx; } __global__ void kernel(uchar4 *dst, int w, int h, int nc) { int idx = blockDim.x * blockIdx.x + threadIdx.x; int idy = blockDim.y * blockIdx.y + threadIdx.y; int offsetx = blockDim.x * gridDim.x; int offsety = blockDim.y * gridDim.y; for (int x = idx; x < w; x += offsetx) { for (int y = idy; y < h; y += offsety) { dst[x + y * w].w = getClass(dst[x + y * w], nc); } } } int main() { std::string input, output; int w, h, nc, np; uchar4 *data; std::cin >> input >> output >> nc; std::vector<std::vector<int2>> classes(nc); for (int i = 0; i < nc; ++i) { std::cin >> np; classes[i].resize(np); for (int j = 0; j < np; ++j) { std::cin >> classes[i][j].x >> classes[i][j].y; } } std::ifstream fsIn(input, std::ios::in | std::ios::binary); if (fsIn.is_open()) { fsIn.read((char *)&w, sizeof(w)); fsIn.read((char *)&h, sizeof(h)); data = new uchar4[w * h]; fsIn.read((char *)data, w * h * sizeof(data[0])); fsIn.close(); } else { return 1; } // дальше начинаются интересные вещи double avg[32][3]; for (int i = 0; i < 32; ++i) { for (int j = 0; j < 3; ++j) { avg[i][j] = 0.0; } } for (int i = 0; i < nc; ++i) { int np = classes[i].size(); for (int j = 0; j < np; ++j) { int x = classes[i][j].x; int y = classes[i][j].y; uchar4 curPixel = data[x + y * w]; avg[i][0] += curPixel.x; avg[i][1] += curPixel.y; avg[i][2] += curPixel.z; } for (int k = 0; k < 3; ++k) { avg[i][k] /= np; } } double cov[32][3][3]; for (int i = 0; i < 32; ++i) { for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { cov[i][j][k] = 0.0; } } } for (int i = 0; i < nc; ++i) { np = classes[i].size(); for (int j = 0; j < np; ++j) { double tmp[3]; int x = classes[i][j].x; int y = classes[i][j].y; uchar4 curPixel = data[x + y * w]; tmp[0] = curPixel.x - avg[i][0]; tmp[1] = curPixel.y - avg[i][1]; tmp[2] = curPixel.z - avg[i][2]; for (int k = 0; k < 3; ++k) { for (int l = 0; l < 3; ++l) { cov[i][k][l] += tmp[k] * tmp[l]; } } } for (int k = 0; k < 3; ++k) { for (int l = 0; l < 3; ++l) { cov[i][k][l] /= np - 1; } } } double cov_inv[32][3][3]; for (int i = 0; i < 32; ++i) { for (int j = 0; j < 3; ++j) { for (int k = 0; k < 3; ++k) { cov_inv[i][j][k] = 0.0; } } } double dets[32]; for (int i = 0; i < nc; ++i) { double det = 0; for (int j = 0; j < 3; ++j) { det += cov[i][0][j] * (cov[i][1][(j + 1) % 3] * cov[i][2][(j + 2) % 3] - cov[i][1][(j + 2) % 3] * cov[i][2][(j + 1) % 3]); } dets[i] = det; } // извините for (int i = 0; i < nc; ++i) { cov_inv[i][0][0] = (cov[i][1][1] * cov[i][2][2] - cov[i][2][1] * cov[i][1][2]) / dets[i]; cov_inv[i][0][1] = (cov[i][0][2] * cov[i][2][1] - cov[i][0][1] * cov[i][2][2]) / dets[i]; cov_inv[i][0][2] = (cov[i][0][1] * cov[i][1][2] - cov[i][0][2] * cov[i][1][1]) / dets[i]; cov_inv[i][1][0] = (cov[i][1][2] * cov[i][2][0] - cov[i][1][0] * cov[i][2][2]) / dets[i]; cov_inv[i][1][1] = (cov[i][0][0] * cov[i][2][2] - cov[i][0][2] * cov[i][2][0]) / dets[i]; cov_inv[i][1][2] = (cov[i][1][0] * cov[i][0][2] - cov[i][0][0] * cov[i][1][2]) / dets[i]; cov_inv[i][2][0] = (cov[i][1][0] * cov[i][2][1] - cov[i][2][0] * cov[i][1][1]) / dets[i]; cov_inv[i][2][1] = (cov[i][2][0] * cov[i][0][1] - cov[i][0][0] * cov[i][2][1]) / dets[i]; cov_inv[i][2][2] = (cov[i][0][0] * cov[i][1][1] - cov[i][1][0] * cov[i][0][1]) / dets[i]; } HANDLE_ERROR(cudaMemcpyToSymbol(AVG, avg, sizeof(double) * 32 * 3)); HANDLE_ERROR(cudaMemcpyToSymbol(COV, cov, sizeof(double) * 32 * 3 * 3)); HANDLE_ERROR(cudaMemcpyToSymbol(COV_INV, cov_inv, sizeof(double) * 32 * 3 * 3)); HANDLE_ERROR(cudaMemcpyToSymbol(DETS, dets, sizeof(double) * 32)); uchar4 *dev_data; HANDLE_ERROR(cudaMalloc(&dev_data, sizeof(uchar4) * h * w)); HANDLE_ERROR(cudaMemcpy(dev_data, data, sizeof(uchar4) * h * w, cudaMemcpyHostToDevice)); kernel<<<dim3(16, 16), dim3(16, 16)>>>(dev_data, w, h, nc); HANDLE_ERROR(cudaMemcpy(data, dev_data, sizeof(uchar4) * h * w, cudaMemcpyDeviceToHost)); std::ofstream fsOut(output, std::ios::out | std::ios::binary); if (fsOut.is_open()) { fsOut.write((char *)&w, sizeof(w)); fsOut.write((char *)&h, sizeof(h)); fsOut.write((char *)data, w * h * sizeof(data[0])); fsOut.close(); } else { return 1; } HANDLE_ERROR(cudaFree(dev_data)); delete[] data; return 0; }
40553f3dde97dbd81c5c5017be423e3ae9fe038c.hip
// !!! This is a file automatically generated by hipify!!! #include "hip/hip_runtime.h" /* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* 2D Convolution: C = A (*) B, A is the 5x5 kernel matrix, B is the image matrix. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project //#include <cutil.h> // includes, kernels #include <2Dconvolution_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix A; Matrix B; Matrix C; srand(2012); if(argc != 5 && argc != 4) { // Allocate and initialize the matrices A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1); B = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1); C = AllocateMatrix(B.height, B.width, 0); } else { // Allocate and read in matrices from disk int* params = NULL; unsigned int data_read = 0; cutReadFilei(argv[1], &params, &data_read, true); if(data_read != 2) { printf("Error reading parameter file\n"); cutFree(params); return 1; } A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0); B = AllocateMatrix(params[0], params[1], 0); C = AllocateMatrix(params[0], params[1], 0); cutFree(params); (void)ReadFile(&A, argv[2]); (void)ReadFile(&B, argv[3]); } // Convolution on the device ConvolutionOnDevice(A, B, C); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(C.height, C.width, 0); computeGold(reference.elements, A.elements, B.elements, B.height, B.width); // in this case check if the result is equivalent to the expected soluion CUTBoolean res = cutComparefe(reference.elements, C.elements, C.width * C.height, 0.0001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); if(argc == 5) { WriteFile(C, argv[4]); } else if(argc == 2) { WriteFile(C, argv[1]); } // Free matrices FreeMatrix(&A); FreeMatrix(&B); FreeMatrix(&C); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// // __device__ __constant__ float* Ad; void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C) { //Load A and B to the device //Matrix Ad = AllocateDeviceMatrix(A); //CopyToDeviceMatrix(Ad, A); hipMalloc((void**)&Ad, KERNEL_SIZE*KERNEL_SIZE*sizeof(float)); hipMemcpyToSymbol(Ad, A.elements, KERNEL_SIZE*KERNEL_SIZE*sizeof(float)); Matrix Bd = AllocateDeviceMatrix(B); CopyToDeviceMatrix(Bd, B); //Allocate C on the device Matrix Cd = AllocateDeviceMatrix(C); CopyToDeviceMatrix(Cd, C); // Clear memory //Setup the execution configuration //Launch the device computation threads! int blocks = B.height; int threads = B.width; hipEvent_t start, stop; float elapsedTime=0.0f; hipEventCreate(&start); hipEventCreate(&stop); hipEventRecord(start, 0); hipLaunchKernelGGL(( ConvolutionKernel), dim3(blocks), dim3(threads), 0, 0, Bd,Cd); hipEventRecord(stop, 0); hipEventSynchronize(stop); hipEventElapsedTime(&elapsedTime, start, stop); printf("The execution time of GPU is :%f\n",elapsedTime); //Read C from the device CopyFromDeviceMatrix(C, Cd); FreeDeviceMatrix(&Bd); FreeDeviceMatrix(&Cd); } //Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); hipMalloc((void**)&Mdevice.elements, size); return Mdevice; } //Allocate a device matrix of dimensions height*width //If init == 0, initialize to all zeroes. //If init == 1, perform random initialization. //If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; //don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX); if(rand() % 2) M.elements[i] = - M.elements[i]; } return M; } //Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; hipMemcpy(Mdevice.elements, Mhost.elements, size,hipMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); hipMemcpy(Mhost.elements, Mdevice.elements, size,hipMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { hipFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } // Read a 16x16 floating point matrix in from file int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height * M->width; cutReadFilef(file_name, &(M->elements), &data_read, true); return data_read; } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); }
40553f3dde97dbd81c5c5017be423e3ae9fe038c.cu
/* * Copyright 1993-2006 NVIDIA Corporation. All rights reserved. * * NOTICE TO USER: * * This source code is subject to NVIDIA ownership rights under U.S. and * international Copyright laws. * * This software and the information contained herein is PROPRIETARY and * CONFIDENTIAL to NVIDIA and is being provided under the terms and * conditions of a Non-Disclosure Agreement. Any reproduction or * disclosure to any third party without the express written consent of * NVIDIA is prohibited. * * NVIDIA MAKES NO REPRESENTATION ABOUT THE SUITABILITY OF THIS SOURCE * CODE FOR ANY PURPOSE. IT IS PROVIDED "AS IS" WITHOUT EXPRESS OR * IMPLIED WARRANTY OF ANY KIND. NVIDIA DISCLAIMS ALL WARRANTIES WITH * REGARD TO THIS SOURCE CODE, INCLUDING ALL IMPLIED WARRANTIES OF * MERCHANTABILITY, NONINFRINGEMENT, AND FITNESS FOR A PARTICULAR PURPOSE. * IN NO EVENT SHALL NVIDIA BE LIABLE FOR ANY SPECIAL, INDIRECT, INCIDENTAL, * OR CONSEQUENTIAL DAMAGES, OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS * OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE * OR PERFORMANCE OF THIS SOURCE CODE. * * U.S. Government End Users. This source code is a "commercial item" as * that term is defined at 48 C.F.R. 2.101 (OCT 1995), consisting of * "commercial computer software" and "commercial computer software * documentation" as such terms are used in 48 C.F.R. 12.212 (SEPT 1995) * and is provided to the U.S. Government only as a commercial end item. * Consistent with 48 C.F.R.12.212 and 48 C.F.R. 227.7202-1 through * 227.7202-4 (JUNE 1995), all U.S. Government End Users acquire the * source code with only those rights set forth herein. */ /* 2D Convolution: C = A (*) B, A is the 5x5 kernel matrix, B is the image matrix. * Host code. */ // includes, system #include <stdlib.h> #include <stdio.h> #include <string.h> #include <math.h> // includes, project //#include <cutil.h> // includes, kernels #include <2Dconvolution_kernel.cu> //////////////////////////////////////////////////////////////////////////////// // declarations, forward extern "C" void computeGold(float*, const float*, const float*, unsigned int, unsigned int); Matrix AllocateDeviceMatrix(const Matrix M); Matrix AllocateMatrix(int height, int width, int init); void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost); void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice); int ReadFile(Matrix* M, char* file_name); void WriteFile(Matrix M, char* file_name); void FreeDeviceMatrix(Matrix* M); void FreeMatrix(Matrix* M); void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C); //////////////////////////////////////////////////////////////////////////////// // Program main //////////////////////////////////////////////////////////////////////////////// int main(int argc, char** argv) { Matrix A; Matrix B; Matrix C; srand(2012); if(argc != 5 && argc != 4) { // Allocate and initialize the matrices A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 1); B = AllocateMatrix((rand() % 1024) + 1, (rand() % 1024) + 1, 1); C = AllocateMatrix(B.height, B.width, 0); } else { // Allocate and read in matrices from disk int* params = NULL; unsigned int data_read = 0; cutReadFilei(argv[1], &params, &data_read, true); if(data_read != 2) { printf("Error reading parameter file\n"); cutFree(params); return 1; } A = AllocateMatrix(KERNEL_SIZE, KERNEL_SIZE, 0); B = AllocateMatrix(params[0], params[1], 0); C = AllocateMatrix(params[0], params[1], 0); cutFree(params); (void)ReadFile(&A, argv[2]); (void)ReadFile(&B, argv[3]); } // Convolution on the device ConvolutionOnDevice(A, B, C); // compute the matrix multiplication on the CPU for comparison Matrix reference = AllocateMatrix(C.height, C.width, 0); computeGold(reference.elements, A.elements, B.elements, B.height, B.width); // in this case check if the result is equivalent to the expected soluion CUTBoolean res = cutComparefe(reference.elements, C.elements, C.width * C.height, 0.0001f); printf("Test %s\n", (1 == res) ? "PASSED" : "FAILED"); if(argc == 5) { WriteFile(C, argv[4]); } else if(argc == 2) { WriteFile(C, argv[1]); } // Free matrices FreeMatrix(&A); FreeMatrix(&B); FreeMatrix(&C); return 0; } //////////////////////////////////////////////////////////////////////////////// //! Run a simple test for CUDA //////////////////////////////////////////////////////////////////////////////// // __device__ __constant__ float* Ad; void ConvolutionOnDevice(const Matrix A, const Matrix B, Matrix C) { //Load A and B to the device //Matrix Ad = AllocateDeviceMatrix(A); //CopyToDeviceMatrix(Ad, A); cudaMalloc((void**)&Ad, KERNEL_SIZE*KERNEL_SIZE*sizeof(float)); cudaMemcpyToSymbol(Ad, A.elements, KERNEL_SIZE*KERNEL_SIZE*sizeof(float)); Matrix Bd = AllocateDeviceMatrix(B); CopyToDeviceMatrix(Bd, B); //Allocate C on the device Matrix Cd = AllocateDeviceMatrix(C); CopyToDeviceMatrix(Cd, C); // Clear memory //Setup the execution configuration //Launch the device computation threads! int blocks = B.height; int threads = B.width; cudaEvent_t start, stop; float elapsedTime=0.0f; cudaEventCreate(&start); cudaEventCreate(&stop); cudaEventRecord(start, 0); ConvolutionKernel<<<blocks, threads>>>(Bd,Cd); cudaEventRecord(stop, 0); cudaEventSynchronize(stop); cudaEventElapsedTime(&elapsedTime, start, stop); printf("The execution time of GPU is :%f\n",elapsedTime); //Read C from the device CopyFromDeviceMatrix(C, Cd); FreeDeviceMatrix(&Bd); FreeDeviceMatrix(&Cd); } //Allocate a device matrix of same size as M. Matrix AllocateDeviceMatrix(const Matrix M) { Matrix Mdevice = M; int size = M.width * M.height * sizeof(float); cudaMalloc((void**)&Mdevice.elements, size); return Mdevice; } //Allocate a device matrix of dimensions height*width //If init == 0, initialize to all zeroes. //If init == 1, perform random initialization. //If init == 2, initialize matrix parameters, but do not allocate memory Matrix AllocateMatrix(int height, int width, int init) { Matrix M; M.width = M.pitch = width; M.height = height; int size = M.width * M.height; M.elements = NULL; //don't allocate memory on option 2 if(init == 2) return M; M.elements = (float*) malloc(size*sizeof(float)); for(unsigned int i = 0; i < M.height * M.width; i++) { M.elements[i] = (init == 0) ? (0.0f) : (rand() / (float)RAND_MAX); if(rand() % 2) M.elements[i] = - M.elements[i]; } return M; } //Copy a host matrix to a device matrix. void CopyToDeviceMatrix(Matrix Mdevice, const Matrix Mhost) { int size = Mhost.width * Mhost.height * sizeof(float); Mdevice.height = Mhost.height; Mdevice.width = Mhost.width; Mdevice.pitch = Mhost.pitch; cudaMemcpy(Mdevice.elements, Mhost.elements, size,cudaMemcpyHostToDevice); } // Copy a device matrix to a host matrix. void CopyFromDeviceMatrix(Matrix Mhost, const Matrix Mdevice) { int size = Mdevice.width * Mdevice.height * sizeof(float); cudaMemcpy(Mhost.elements, Mdevice.elements, size,cudaMemcpyDeviceToHost); } // Free a device matrix. void FreeDeviceMatrix(Matrix* M) { cudaFree(M->elements); M->elements = NULL; } // Free a host Matrix void FreeMatrix(Matrix* M) { free(M->elements); M->elements = NULL; } // Read a 16x16 floating point matrix in from file int ReadFile(Matrix* M, char* file_name) { unsigned int data_read = M->height * M->width; cutReadFilef(file_name, &(M->elements), &data_read, true); return data_read; } // Write a 16x16 floating point matrix to file void WriteFile(Matrix M, char* file_name) { cutWriteFilef(file_name, M.elements, M.width*M.height, 0.0001f); }
e2d16cf78b5c6a8b0fbd0fa38054eb5efe36a066.hip
// !!! This is a file automatically generated by hipify!!! /* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <strings/count_matches.hpp> #include <strings/regex/regex_program_impl.h> #include <strings/regex/utilities.cuh> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/strings_column_factories.cuh> #include <cudf/strings/split/split_re.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/distance.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/pair.h> #include <thrust/transform_reduce.h> #include <thrust/transform_scan.h> namespace cudf { namespace strings { namespace detail { namespace { using string_index_pair = thrust::pair<char const*, size_type>; enum class split_direction { FORWARD, ///< for split logic BACKWARD ///< for rsplit logic }; /** * @brief Identify the tokens from the `idx'th` string element of `d_strings`. * * Each string's tokens are stored in the `d_tokens` vector. * The `d_token_offsets` specifies the output position within `d_tokens` * for each string. */ struct token_reader_fn { column_device_view const d_strings; split_direction const direction; size_type const* d_token_offsets; string_index_pair* d_tokens; __device__ void operator()(size_type const idx, reprog_device const prog, int32_t const prog_idx) { if (d_strings.is_null(idx)) { return; } auto const d_str = d_strings.element<string_view>(idx); auto const nchars = d_str.length(); auto const token_offset = d_token_offsets[idx]; auto const token_count = d_token_offsets[idx + 1] - token_offset; auto const d_result = d_tokens + token_offset; // store tokens here size_type token_idx = 0; auto itr = d_str.begin(); auto last_pos = itr; while (itr.position() <= nchars) { auto const match = prog.find(prog_idx, d_str, itr); if (!match) { break; } auto const start_pos = thrust::get<0>(match_positions_to_bytes(*match, d_str, last_pos)); // get the token (characters just before this match) auto const token = string_index_pair{d_str.data() + last_pos.byte_offset(), start_pos - last_pos.byte_offset()}; // store it if we have space if (token_idx < token_count - 1) { d_result[token_idx++] = token; } else { if (direction == split_direction::FORWARD) { break; } // we are done for (auto l = 0; l < token_idx - 1; ++l) { d_result[l] = d_result[l + 1]; // shift left } d_result[token_idx - 1] = token; } // setup for next match last_pos += (match->second - last_pos.position()); itr = last_pos + (match->first == match->second); } // set the last token to the remainder of the string d_result[token_idx] = string_index_pair{d_str.data() + last_pos.byte_offset(), d_str.size_bytes() - last_pos.byte_offset()}; if (direction == split_direction::BACKWARD) { // update first entry -- this happens when max_tokens is hit before the end of the string auto const first_offset = d_result[0].first ? static_cast<size_type>(thrust::distance(d_str.data(), d_result[0].first)) : 0; if (first_offset) { d_result[0] = string_index_pair{d_str.data(), first_offset + d_result[0].second}; } } } }; /** * @brief Call regex to split each input string into tokens. * * This will also convert the `offsets` values from counts to offsets. * * @param d_strings Strings to split * @param d_prog Regex to evaluate against each string * @param direction Whether tokens are generated forwards or backwards. * @param max_tokens The maximum number of tokens for each split. * @param offsets The number of matches on input. * The offsets for each token in each string on output. * @param stream CUDA stream used for kernel launches. */ rmm::device_uvector<string_index_pair> generate_tokens(column_device_view const& d_strings, reprog_device& d_prog, split_direction direction, size_type maxsplit, mutable_column_view& offsets, rmm::cuda_stream_view stream) { auto const strings_count = d_strings.size(); auto const max_tokens = maxsplit > 0 ? maxsplit : std::numeric_limits<size_type>::max(); auto const begin = thrust::make_counting_iterator<size_type>(0); auto const end = thrust::make_counting_iterator<size_type>(strings_count); auto const d_offsets = offsets.data<size_type>(); // convert match counts to token offsets auto map_fn = [d_strings, d_offsets, max_tokens] __device__(auto idx) { return d_strings.is_null(idx) ? 0 : ::min(d_offsets[idx], max_tokens) + 1; }; thrust::transform_exclusive_scan( rmm::exec_policy(stream), begin, end + 1, d_offsets, map_fn, 0, thrust::plus<size_type>{}); // the last offset entry is the total number of tokens to be generated auto const total_tokens = cudf::detail::get_value<size_type>(offsets, strings_count, stream); rmm::device_uvector<string_index_pair> tokens(total_tokens, stream); if (total_tokens == 0) { return tokens; } launch_for_each_kernel(token_reader_fn{d_strings, direction, d_offsets, tokens.data()}, d_prog, d_strings.size(), stream); return tokens; } /** * @brief Returns string pair for the specified column for each string in `d_strings` * * This is used to build the table result of a split. * Null is returned if the row is null or if the `column_index` is larger * than the token count for that string. */ struct tokens_transform_fn { column_device_view const d_strings; string_index_pair const* d_tokens; size_type const* d_token_offsets; size_type const column_index; __device__ string_index_pair operator()(size_type idx) const { auto const offset = d_token_offsets[idx]; auto const token_count = d_token_offsets[idx + 1] - offset; return (column_index >= token_count) || d_strings.is_null(idx) ? string_index_pair{nullptr, 0} : d_tokens[offset + column_index]; } }; std::unique_ptr<table> split_re(strings_column_view const& input, regex_program const& prog, split_direction direction, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(!prog.pattern().empty(), "Parameter pattern must not be empty"); auto const strings_count = input.size(); std::vector<std::unique_ptr<column>> results; if (strings_count == 0) { results.push_back(make_empty_column(type_id::STRING)); return std::make_unique<table>(std::move(results)); } // create device object from regex_program auto d_prog = regex_device_builder::create_prog_device(prog, stream); auto d_strings = column_device_view::create(input.parent(), stream); // count the number of delimiters matched in each string auto offsets = count_matches( *d_strings, *d_prog, strings_count + 1, stream, rmm::mr::get_current_device_resource()); auto offsets_view = offsets->mutable_view(); auto d_offsets = offsets_view.data<size_type>(); // get the split tokens from the input column; this also converts the counts into offsets auto tokens = generate_tokens(*d_strings, *d_prog, direction, maxsplit, offsets_view, stream); // the output column count is the maximum number of tokens generated for any input string auto const columns_count = thrust::transform_reduce( rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), [d_offsets] __device__(auto const idx) -> size_type { return d_offsets[idx + 1] - d_offsets[idx]; }, 0, thrust::maximum<size_type>{}); // boundary case: if no columns, return one all-null column (custrings issue #119) if (columns_count == 0) { results.push_back(std::make_unique<column>( data_type{type_id::STRING}, strings_count, rmm::device_buffer{0, stream, mr}, // no data cudf::detail::create_null_mask(strings_count, mask_state::ALL_NULL, stream, mr), strings_count)); return std::make_unique<table>(std::move(results)); } // convert the tokens into multiple strings columns auto make_strings_lambda = [&](size_type column_index) { // returns appropriate token for each row/column auto indices_itr = cudf::detail::make_counting_transform_iterator( 0, tokens_transform_fn{*d_strings, tokens.data(), d_offsets, column_index}); return make_strings_column(indices_itr, indices_itr + strings_count, stream, mr); }; // build a vector of columns results.resize(columns_count); std::transform(thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(columns_count), results.begin(), make_strings_lambda); return std::make_unique<table>(std::move(results)); } std::unique_ptr<column> split_record_re(strings_column_view const& input, regex_program const& prog, split_direction direction, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(!prog.pattern().empty(), "Parameter pattern must not be empty"); auto const strings_count = input.size(); // create device object from regex_program auto d_prog = regex_device_builder::create_prog_device(prog, stream); auto d_strings = column_device_view::create(input.parent(), stream); // count the number of delimiters matched in each string auto offsets = count_matches(*d_strings, *d_prog, strings_count + 1, stream, mr); auto offsets_view = offsets->mutable_view(); // get the split tokens from the input column; this also converts the counts into offsets auto tokens = generate_tokens(*d_strings, *d_prog, direction, maxsplit, offsets_view, stream); // convert the tokens into one big strings column auto strings_output = make_strings_column(tokens.begin(), tokens.end(), stream, mr); // create a lists column using the offsets and the strings columns return make_lists_column(strings_count, std::move(offsets), std::move(strings_output), input.null_count(), copy_bitmask(input.parent(), stream, mr), stream, mr); } } // namespace std::unique_ptr<table> split_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return split_re(input, prog, split_direction::FORWARD, maxsplit, stream, mr); } std::unique_ptr<column> split_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return split_record_re(input, prog, split_direction::FORWARD, maxsplit, stream, mr); } std::unique_ptr<table> rsplit_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return split_re(input, prog, split_direction::BACKWARD, maxsplit, stream, mr); } std::unique_ptr<column> rsplit_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return split_record_re(input, prog, split_direction::BACKWARD, maxsplit, stream, mr); } } // namespace detail // external APIs std::unique_ptr<table> split_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::split_re(input, prog, maxsplit, cudf::get_default_stream(), mr); } std::unique_ptr<column> split_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::split_record_re(input, prog, maxsplit, cudf::get_default_stream(), mr); } std::unique_ptr<table> rsplit_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::rsplit_re(input, prog, maxsplit, cudf::get_default_stream(), mr); } std::unique_ptr<column> rsplit_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::rsplit_record_re(input, prog, maxsplit, cudf::get_default_stream(), mr); } } // namespace strings } // namespace cudf
e2d16cf78b5c6a8b0fbd0fa38054eb5efe36a066.cu
/* * Copyright (c) 2022-2023, NVIDIA CORPORATION. * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #include <strings/count_matches.hpp> #include <strings/regex/regex_program_impl.h> #include <strings/regex/utilities.cuh> #include <cudf/column/column.hpp> #include <cudf/column/column_device_view.cuh> #include <cudf/column/column_factories.hpp> #include <cudf/detail/get_value.cuh> #include <cudf/detail/iterator.cuh> #include <cudf/detail/nvtx/ranges.hpp> #include <cudf/strings/detail/strings_column_factories.cuh> #include <cudf/strings/split/split_re.hpp> #include <cudf/strings/string_view.cuh> #include <cudf/utilities/default_stream.hpp> #include <rmm/cuda_stream_view.hpp> #include <thrust/distance.h> #include <thrust/functional.h> #include <thrust/iterator/counting_iterator.h> #include <thrust/pair.h> #include <thrust/transform_reduce.h> #include <thrust/transform_scan.h> namespace cudf { namespace strings { namespace detail { namespace { using string_index_pair = thrust::pair<char const*, size_type>; enum class split_direction { FORWARD, ///< for split logic BACKWARD ///< for rsplit logic }; /** * @brief Identify the tokens from the `idx'th` string element of `d_strings`. * * Each string's tokens are stored in the `d_tokens` vector. * The `d_token_offsets` specifies the output position within `d_tokens` * for each string. */ struct token_reader_fn { column_device_view const d_strings; split_direction const direction; size_type const* d_token_offsets; string_index_pair* d_tokens; __device__ void operator()(size_type const idx, reprog_device const prog, int32_t const prog_idx) { if (d_strings.is_null(idx)) { return; } auto const d_str = d_strings.element<string_view>(idx); auto const nchars = d_str.length(); auto const token_offset = d_token_offsets[idx]; auto const token_count = d_token_offsets[idx + 1] - token_offset; auto const d_result = d_tokens + token_offset; // store tokens here size_type token_idx = 0; auto itr = d_str.begin(); auto last_pos = itr; while (itr.position() <= nchars) { auto const match = prog.find(prog_idx, d_str, itr); if (!match) { break; } auto const start_pos = thrust::get<0>(match_positions_to_bytes(*match, d_str, last_pos)); // get the token (characters just before this match) auto const token = string_index_pair{d_str.data() + last_pos.byte_offset(), start_pos - last_pos.byte_offset()}; // store it if we have space if (token_idx < token_count - 1) { d_result[token_idx++] = token; } else { if (direction == split_direction::FORWARD) { break; } // we are done for (auto l = 0; l < token_idx - 1; ++l) { d_result[l] = d_result[l + 1]; // shift left } d_result[token_idx - 1] = token; } // setup for next match last_pos += (match->second - last_pos.position()); itr = last_pos + (match->first == match->second); } // set the last token to the remainder of the string d_result[token_idx] = string_index_pair{d_str.data() + last_pos.byte_offset(), d_str.size_bytes() - last_pos.byte_offset()}; if (direction == split_direction::BACKWARD) { // update first entry -- this happens when max_tokens is hit before the end of the string auto const first_offset = d_result[0].first ? static_cast<size_type>(thrust::distance(d_str.data(), d_result[0].first)) : 0; if (first_offset) { d_result[0] = string_index_pair{d_str.data(), first_offset + d_result[0].second}; } } } }; /** * @brief Call regex to split each input string into tokens. * * This will also convert the `offsets` values from counts to offsets. * * @param d_strings Strings to split * @param d_prog Regex to evaluate against each string * @param direction Whether tokens are generated forwards or backwards. * @param max_tokens The maximum number of tokens for each split. * @param offsets The number of matches on input. * The offsets for each token in each string on output. * @param stream CUDA stream used for kernel launches. */ rmm::device_uvector<string_index_pair> generate_tokens(column_device_view const& d_strings, reprog_device& d_prog, split_direction direction, size_type maxsplit, mutable_column_view& offsets, rmm::cuda_stream_view stream) { auto const strings_count = d_strings.size(); auto const max_tokens = maxsplit > 0 ? maxsplit : std::numeric_limits<size_type>::max(); auto const begin = thrust::make_counting_iterator<size_type>(0); auto const end = thrust::make_counting_iterator<size_type>(strings_count); auto const d_offsets = offsets.data<size_type>(); // convert match counts to token offsets auto map_fn = [d_strings, d_offsets, max_tokens] __device__(auto idx) { return d_strings.is_null(idx) ? 0 : std::min(d_offsets[idx], max_tokens) + 1; }; thrust::transform_exclusive_scan( rmm::exec_policy(stream), begin, end + 1, d_offsets, map_fn, 0, thrust::plus<size_type>{}); // the last offset entry is the total number of tokens to be generated auto const total_tokens = cudf::detail::get_value<size_type>(offsets, strings_count, stream); rmm::device_uvector<string_index_pair> tokens(total_tokens, stream); if (total_tokens == 0) { return tokens; } launch_for_each_kernel(token_reader_fn{d_strings, direction, d_offsets, tokens.data()}, d_prog, d_strings.size(), stream); return tokens; } /** * @brief Returns string pair for the specified column for each string in `d_strings` * * This is used to build the table result of a split. * Null is returned if the row is null or if the `column_index` is larger * than the token count for that string. */ struct tokens_transform_fn { column_device_view const d_strings; string_index_pair const* d_tokens; size_type const* d_token_offsets; size_type const column_index; __device__ string_index_pair operator()(size_type idx) const { auto const offset = d_token_offsets[idx]; auto const token_count = d_token_offsets[idx + 1] - offset; return (column_index >= token_count) || d_strings.is_null(idx) ? string_index_pair{nullptr, 0} : d_tokens[offset + column_index]; } }; std::unique_ptr<table> split_re(strings_column_view const& input, regex_program const& prog, split_direction direction, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(!prog.pattern().empty(), "Parameter pattern must not be empty"); auto const strings_count = input.size(); std::vector<std::unique_ptr<column>> results; if (strings_count == 0) { results.push_back(make_empty_column(type_id::STRING)); return std::make_unique<table>(std::move(results)); } // create device object from regex_program auto d_prog = regex_device_builder::create_prog_device(prog, stream); auto d_strings = column_device_view::create(input.parent(), stream); // count the number of delimiters matched in each string auto offsets = count_matches( *d_strings, *d_prog, strings_count + 1, stream, rmm::mr::get_current_device_resource()); auto offsets_view = offsets->mutable_view(); auto d_offsets = offsets_view.data<size_type>(); // get the split tokens from the input column; this also converts the counts into offsets auto tokens = generate_tokens(*d_strings, *d_prog, direction, maxsplit, offsets_view, stream); // the output column count is the maximum number of tokens generated for any input string auto const columns_count = thrust::transform_reduce( rmm::exec_policy(stream), thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(strings_count), [d_offsets] __device__(auto const idx) -> size_type { return d_offsets[idx + 1] - d_offsets[idx]; }, 0, thrust::maximum<size_type>{}); // boundary case: if no columns, return one all-null column (custrings issue #119) if (columns_count == 0) { results.push_back(std::make_unique<column>( data_type{type_id::STRING}, strings_count, rmm::device_buffer{0, stream, mr}, // no data cudf::detail::create_null_mask(strings_count, mask_state::ALL_NULL, stream, mr), strings_count)); return std::make_unique<table>(std::move(results)); } // convert the tokens into multiple strings columns auto make_strings_lambda = [&](size_type column_index) { // returns appropriate token for each row/column auto indices_itr = cudf::detail::make_counting_transform_iterator( 0, tokens_transform_fn{*d_strings, tokens.data(), d_offsets, column_index}); return make_strings_column(indices_itr, indices_itr + strings_count, stream, mr); }; // build a vector of columns results.resize(columns_count); std::transform(thrust::make_counting_iterator<size_type>(0), thrust::make_counting_iterator<size_type>(columns_count), results.begin(), make_strings_lambda); return std::make_unique<table>(std::move(results)); } std::unique_ptr<column> split_record_re(strings_column_view const& input, regex_program const& prog, split_direction direction, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { CUDF_EXPECTS(!prog.pattern().empty(), "Parameter pattern must not be empty"); auto const strings_count = input.size(); // create device object from regex_program auto d_prog = regex_device_builder::create_prog_device(prog, stream); auto d_strings = column_device_view::create(input.parent(), stream); // count the number of delimiters matched in each string auto offsets = count_matches(*d_strings, *d_prog, strings_count + 1, stream, mr); auto offsets_view = offsets->mutable_view(); // get the split tokens from the input column; this also converts the counts into offsets auto tokens = generate_tokens(*d_strings, *d_prog, direction, maxsplit, offsets_view, stream); // convert the tokens into one big strings column auto strings_output = make_strings_column(tokens.begin(), tokens.end(), stream, mr); // create a lists column using the offsets and the strings columns return make_lists_column(strings_count, std::move(offsets), std::move(strings_output), input.null_count(), copy_bitmask(input.parent(), stream, mr), stream, mr); } } // namespace std::unique_ptr<table> split_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return split_re(input, prog, split_direction::FORWARD, maxsplit, stream, mr); } std::unique_ptr<column> split_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return split_record_re(input, prog, split_direction::FORWARD, maxsplit, stream, mr); } std::unique_ptr<table> rsplit_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return split_re(input, prog, split_direction::BACKWARD, maxsplit, stream, mr); } std::unique_ptr<column> rsplit_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::cuda_stream_view stream, rmm::mr::device_memory_resource* mr) { return split_record_re(input, prog, split_direction::BACKWARD, maxsplit, stream, mr); } } // namespace detail // external APIs std::unique_ptr<table> split_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::split_re(input, prog, maxsplit, cudf::get_default_stream(), mr); } std::unique_ptr<column> split_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::split_record_re(input, prog, maxsplit, cudf::get_default_stream(), mr); } std::unique_ptr<table> rsplit_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::rsplit_re(input, prog, maxsplit, cudf::get_default_stream(), mr); } std::unique_ptr<column> rsplit_record_re(strings_column_view const& input, regex_program const& prog, size_type maxsplit, rmm::mr::device_memory_resource* mr) { CUDF_FUNC_RANGE(); return detail::rsplit_record_re(input, prog, maxsplit, cudf::get_default_stream(), mr); } } // namespace strings } // namespace cudf